code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase__ :Dict = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ :List[Any] = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
lowercase__ :int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 101 | '''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
SCREAMING_SNAKE_CASE : int = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : Tuple = ["""GLPNFeatureExtractor"""]
SCREAMING_SNAKE_CASE : Dict = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 102 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __snake_case ( UpperCamelCase_ ):
_a = ['''image_processor''', '''tokenizer''']
_a = '''CLIPImageProcessor'''
_a = ('''XLMRobertaTokenizer''', '''XLMRobertaTokenizerFast''')
def __init__( self : Optional[Any] , A_ : Union[str, Any]=None , A_ : List[Any]=None , **A_ : int):
lowerCAmelCase_ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , A_ , )
lowerCAmelCase_ : str = kwargs.pop('''feature_extractor''')
lowerCAmelCase_ : List[str] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''')
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''')
super().__init__(A_ , A_)
def __call__( self : Optional[Any] , A_ : Dict=None , A_ : int=None , A_ : str=None , **A_ : List[str]):
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''')
if text is not None:
lowerCAmelCase_ : str = self.tokenizer(A_ , return_tensors=A_ , **A_)
if images is not None:
lowerCAmelCase_ : str = self.image_processor(A_ , return_tensors=A_ , **A_)
if text is not None and images is not None:
lowerCAmelCase_ : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**A_) , tensor_type=A_)
def UpperCAmelCase__ ( self : Any , *A_ : Tuple , **A_ : List[str]):
return self.tokenizer.batch_decode(*A_ , **A_)
def UpperCAmelCase__ ( self : str , *A_ : List[str] , **A_ : Tuple):
return self.tokenizer.decode(*A_ , **A_)
@property
def UpperCAmelCase__ ( self : Optional[int]):
lowerCAmelCase_ : Tuple = self.tokenizer.model_input_names
lowerCAmelCase_ : Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 103 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 0 |
'''simple docstring'''
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def _A ( ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''-f''' )
__lowercase = parser.parse_args()
return args.f
def _A ( A__ ):
"""simple docstring"""
__lowercase = {}
__lowercase = os.path.join(A__ , '''all_results.json''' )
if os.path.exists(A__ ):
with open(A__ , '''r''' ) as f:
__lowercase = json.load(A__ )
else:
raise ValueError(F"can't find {path}" )
return results
def _A ( ):
"""simple docstring"""
__lowercase = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
__lowercase = tempfile.mkdtemp()
__lowercase = os.path.join(cls.tmpdir ,'''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
__lowercase = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Any ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --seed=42\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --block_size 128\n --per_device_train_batch_size 5\n --per_device_eval_batch_size 5\n --num_train_epochs 2\n --output_dir {tmp_dir}\n --checkpointing_steps epoch\n --with_tracking\n ".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,1_0_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[int] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --num_train_epochs=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertLess(result['''perplexity'''] ,4_2 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
__lowercase = 7 if get_gpu_count() > 1 else 2
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.7_5 )
self.assertLess(result['''train_loss'''] ,0.5 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --seed=42\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] ,2_8 )
self.assertGreaterEqual(result['''eval_exact'''] ,2_8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Dict ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/swag/sample.json\n --validation_file tests/fixtures/tests_samples/swag/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=20\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.8 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[str] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_rouge1'''] ,1_0 )
self.assertGreaterEqual(result['''eval_rouge2'''] ,2 )
self.assertGreaterEqual(result['''eval_rougeL'''] ,7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] ,7 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : List[Any] ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/translation/run_translation_no_trainer.py\n --model_name_or_path sshleifer/student_marian_en_ro_6_1\n --source_lang en\n --target_lang ro\n --train_file tests/fixtures/tests_samples/wmt16/sample.json\n --validation_file tests/fixtures/tests_samples/wmt16/sample.json\n --output_dir {tmp_dir}\n --max_train_steps=50\n --num_warmup_steps=8\n --num_beams=6\n --learning_rate=3e-3\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --source_lang en_XX\n --target_lang ro_RO\n --checkpointing_steps epoch\n --with_tracking\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_bleu'''] ,3_0 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''translation_no_trainer''' ) ) )
@slow
def SCREAMING_SNAKE_CASE ( self : str ):
__lowercase = logging.StreamHandler(sys.stdout )
logger.addHandler(lowercase__ )
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py\n --dataset_name huggingface/semantic-segmentation-test-sample\n --output_dir {tmp_dir}\n --max_train_steps=10\n --num_warmup_steps=2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --checkpointing_steps epoch\n ".split()
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] ,0.1_0 )
@mock.patch.dict(os.environ ,{'''WANDB_MODE''': '''offline'''} )
def SCREAMING_SNAKE_CASE ( self : Tuple ):
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = F"\n {self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py\n --model_name_or_path google/vit-base-patch16-224-in21k\n --dataset_name hf-internal-testing/cats_vs_dogs_sample\n --learning_rate 1e-4\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 1\n --max_train_steps 2\n --train_val_split 0.1\n --seed 42\n --output_dir {tmp_dir}\n --with_tracking\n --checkpointing_steps 1\n ".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
__lowercase = get_results(lowercase__ )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] ,0.6 )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(lowercase__ ,'''image_classification_no_trainer''' ) ) )
| 104 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="gpt_neox_japanese"
def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
| 67 | 0 |
"""simple docstring"""
from torch import nn
class __UpperCamelCase ( nn.Module ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
super().__init__()
a : Optional[Any] = class_size
a : Any = embed_size
# self.mlp1 = nn.Linear(embed_size, embed_size)
# self.mlp2 = (nn.Linear(embed_size, class_size))
a : Optional[Any] = nn.Linear(lowerCAmelCase__ , lowerCAmelCase__ )
def __a ( self , lowerCAmelCase__ ) -> str:
# hidden_state = nn.functional.relu(self.mlp1(hidden_state))
# hidden_state = self.mlp2(hidden_state)
a : List[Any] = self.mlp(lowerCAmelCase__ )
return logits
| 105 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 106 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
__lowerCAmelCase : Any = range(2, 20 + 1)
__lowerCAmelCase : str = [10**k for k in range(ks[-1] + 1)]
__lowerCAmelCase : dict[int, dict[int, list[list[int]]]] = {}
def __magic_name__ ( A : Optional[int], A : List[Any], A : List[Any], A : Tuple ):
'''simple docstring'''
a = sum(a_i[j] for j in range(A, len(A ) ) )
a = sum(a_i[j] * base[j] for j in range(min(len(A ), A ) ) )
a , a = 0, 0
a = n - i
a = memo.get(A )
if sub_memo is not None:
a = sub_memo.get(A )
if jumps is not None and len(A ) > 0:
# find and make the largest jump without going over
a = -1
for _k in range(len(A ) - 1, -1, -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
a = _k
break
if max_jump >= 0:
a , a , a = jumps[max_jump]
# since the difference between jumps is cached, add c
a = diff + c
for j in range(min(A, len(A ) ) ):
a , a = divmod(A, 10 )
if new_c > 0:
add(A, A, A )
else:
a = []
else:
a = {c: []}
a = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
a , a = next_term(A, k - 1, i + dn, A )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
a , a = compute(A, A, i + dn, A )
diff += _diff
dn += terms_jumped
a = sub_memo[c]
# keep jumps sorted by # of terms skipped
a = 0
while j < len(A ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(A, (diff, dn, k) )
return (diff, dn)
def __magic_name__ ( A : str, A : Union[str, Any], A : Optional[int], A : List[str] ):
'''simple docstring'''
if i >= n:
return 0, i
if k > len(A ):
a_i.extend([0 for _ in range(k - len(A ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
a = i
a , a , a = 0, 0, 0
for j in range(len(A ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
a = ds_c + ds_b
diff += addend
a = 0
for j in range(A ):
a = a_i[j] + addend
a , a = divmod(A, 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(A, A, A )
return diff, i - start_i
def __magic_name__ ( A : int, A : Any, A : Optional[Any] ):
'''simple docstring'''
for j in range(A, len(A ) ):
a = digits[j] + addend
if s >= 10:
a , a = divmod(A, 10 )
a = addend // 10 + quotient
else:
a = s
a = addend // 10
if addend == 0:
break
while addend > 0:
a , a = divmod(A, 10 )
digits.append(A )
def __magic_name__ ( A : int = 10**15 ):
'''simple docstring'''
a = [1]
a = 1
a = 0
while True:
a , a = next_term(A, 20, i + dn, A )
dn += terms_jumped
if dn == n - i:
break
a = 0
for j in range(len(A ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F'''{solution() = }''')
| 107 | '''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
try:
__lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
__lowerCamelCase = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
__lowerCamelCase = teacher.config_class(**UpperCamelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 67 | 0 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
if height >= 1:
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_disk(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
move_tower(height - 1 , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
print("moving disk from" , SCREAMING_SNAKE_CASE , "to" , SCREAMING_SNAKE_CASE )
def a__ ( ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = int(input("Height of hanoi: " ).strip() )
move_tower(SCREAMING_SNAKE_CASE , "A" , "B" , "C" )
if __name__ == "__main__":
main()
| 108 | '''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
A: Optional[int] = logging.get_logger(__name__)
A: List[Any] = {
"google/bit-50": "https://huggingface.co/google/bit-50/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ ):
__lowerCAmelCase : Optional[int] = 'bit'
__lowerCAmelCase : Union[str, Any] = ['preactivation', 'bottleneck']
__lowerCAmelCase : Union[str, Any] = ['SAME', 'VALID']
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=[256, 512, 1024, 2048] , _SCREAMING_SNAKE_CASE=[3, 4, 6, 3] , _SCREAMING_SNAKE_CASE="preactivation" , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> int:
'''simple docstring'''
super().__init__(**_SCREAMING_SNAKE_CASE )
if layer_type not in self.layer_types:
raise ValueError(F"layer_type={layer_type} is not one of {','.join(self.layer_types )}" )
if global_padding is not None:
if global_padding.upper() in self.supported_padding:
UpperCAmelCase : List[str] = global_padding.upper()
else:
raise ValueError(F"Padding strategy {global_padding} not supported" )
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Optional[int] = embedding_size
UpperCAmelCase : List[str] = hidden_sizes
UpperCAmelCase : int = depths
UpperCAmelCase : Tuple = layer_type
UpperCAmelCase : Tuple = hidden_act
UpperCAmelCase : Optional[int] = global_padding
UpperCAmelCase : Any = num_groups
UpperCAmelCase : Any = drop_path_rate
UpperCAmelCase : List[str] = embedding_dynamic_padding
UpperCAmelCase : Dict = output_stride
UpperCAmelCase : Union[str, Any] = width_factor
UpperCAmelCase : Tuple = ["""stem"""] + [F"stage{idx}" for idx in range(1 , len(_SCREAMING_SNAKE_CASE ) + 1 )]
UpperCAmelCase , UpperCAmelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=_SCREAMING_SNAKE_CASE , out_indices=_SCREAMING_SNAKE_CASE , stage_names=self.stage_names )
| 109 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 67 | 0 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = os.path.abspath(SCREAMING_SNAKE_CASE )
logger.info(f'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
lowercase__ = tf.train.list_variables(SCREAMING_SNAKE_CASE )
lowercase__ = []
lowercase__ = []
lowercase__ = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
lowercase__ = full_name.split('''/''' )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(f'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(f'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
lowercase__ = name[1:]
# figure out how many levels deep the name is
lowercase__ = 0
for _name in name:
if _name.startswith('''layer_with_weights''' ):
depth += 1
else:
break
layer_depth.append(SCREAMING_SNAKE_CASE )
# read data
lowercase__ = tf.train.load_variable(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
names.append('''/'''.join(SCREAMING_SNAKE_CASE ) )
arrays.append(SCREAMING_SNAKE_CASE )
logger.info(f'Read a total of {len(SCREAMING_SNAKE_CASE ):,} layers' )
# Sanity check
if len(set(SCREAMING_SNAKE_CASE ) ) != 1:
raise ValueError(f'Found layer names with different depths (layer depth {list(set(SCREAMING_SNAKE_CASE ) )})' )
lowercase__ = list(set(SCREAMING_SNAKE_CASE ) )[0]
if layer_depth != 1:
raise ValueError(
'''The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP'''
''' heads.''' )
# convert layers
logger.info('''Converting weights...''' )
for full_name, array in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
lowercase__ = full_name.split('''/''' )
lowercase__ = model
lowercase__ = []
for i, m_name in enumerate(SCREAMING_SNAKE_CASE ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith('''layer_with_weights''' ):
lowercase__ = int(m_name.split('''-''' )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(['''embeddings''', '''LayerNorm'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''embeddings''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''LayerNorm''' )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(['''encoder''', '''layer''', str(layer_num - 4 )] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''encoder''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''layer''' )
lowercase__ = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(['''pooler''', '''dense'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''pooler''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''dense''' )
elif m_name == "embeddings":
trace.append('''embeddings''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''embeddings''' )
if layer_num == 0:
trace.append('''word_embeddings''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''word_embeddings''' )
elif layer_num == 1:
trace.append('''position_embeddings''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''position_embeddings''' )
elif layer_num == 2:
trace.append('''token_type_embeddings''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''token_type_embeddings''' )
else:
raise ValueError(f'Unknown embedding layer with name {full_name}' )
trace.append('''weight''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''weight''' )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(['''attention''', '''self'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''attention''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''self''' )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(['''attention''', '''output''', '''LayerNorm'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''attention''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''output''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''LayerNorm''' )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(['''attention''', '''output''', '''dense'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''attention''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''output''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''dense''' )
elif m_name == "_output_dense":
# output dense
trace.extend(['''output''', '''dense'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''output''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''dense''' )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(['''output''', '''LayerNorm'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''output''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''LayerNorm''' )
elif m_name == "_key_dense":
# attention key
trace.append('''key''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''key''' )
elif m_name == "_query_dense":
# attention query
trace.append('''query''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''query''' )
elif m_name == "_value_dense":
# attention value
trace.append('''value''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''value''' )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(['''intermediate''', '''dense'''] )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''intermediate''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''dense''' )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append('''output''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''output''' )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append('''bias''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''bias''' )
elif m_name in ["kernel", "gamma"]:
trace.append('''weight''' )
lowercase__ = getattr(SCREAMING_SNAKE_CASE , '''weight''' )
else:
logger.warning(f'Ignored {m_name}' )
# for certain layers reshape is necessary
lowercase__ = '''.'''.join(SCREAMING_SNAKE_CASE )
if re.match(R'''(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)''' , SCREAMING_SNAKE_CASE ) or re.match(
R'''(\S+)\.attention\.output\.dense\.weight''' , SCREAMING_SNAKE_CASE ):
lowercase__ = array.reshape(pointer.data.shape )
if "kernel" in full_name:
lowercase__ = array.transpose()
if pointer.shape == array.shape:
lowercase__ = torch.from_numpy(SCREAMING_SNAKE_CASE )
else:
raise ValueError(
f'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
f' {array.shape}' )
logger.info(f'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
logger.info(f'Loading model based on config from {config_path}...' )
lowercase__ = BertConfig.from_json_file(SCREAMING_SNAKE_CASE )
lowercase__ = BertModel(SCREAMING_SNAKE_CASE )
# Load weights from checkpoint
logger.info(f'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Save pytorch-model
logger.info(f'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--tf_checkpoint_path', type=str, required=True, help='Path to the TensorFlow 2.x checkpoint path.'
)
parser.add_argument(
'--bert_config_file',
type=str,
required=True,
help='The config json file corresponding to the BERT model. This specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path',
type=str,
required=True,
help='Path to the output PyTorch model (must include filename).',
)
lowerCAmelCase = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 110 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
a = {
'configuration_trocr': ['TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP', 'TrOCRConfig'],
'processing_trocr': ['TrOCRProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'TROCR_PRETRAINED_MODEL_ARCHIVE_LIST',
'TrOCRForCausalLM',
'TrOCRPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | 0 |
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
a : Optional[int] = logging.get_logger(__name__)
@dataclass
class a :
"""simple docstring"""
def __init__( self : Union[str, Any] , __lowercase : Union[str, Any]=False , __lowercase : Union[str, Any]=False , __lowercase : Optional[Any]=6.0 , __lowercase : Dict=None , __lowercase : str=False , __lowercase : List[Any]=False , __lowercase : Optional[Any]=None , __lowercase : Optional[Any]="fp4" , __lowercase : Union[str, Any]=False , **__lowercase : Optional[Any] , ) -> List[str]:
__UpperCAmelCase : str = load_in_abit
__UpperCAmelCase : str = load_in_abit
__UpperCAmelCase : Optional[int] = llm_inta_threshold
__UpperCAmelCase : Tuple = llm_inta_skip_modules
__UpperCAmelCase : Optional[Any] = llm_inta_enable_fpaa_cpu_offload
__UpperCAmelCase : Optional[int] = llm_inta_has_fpaa_weight
__UpperCAmelCase : List[Any] = bnb_abit_quant_type
__UpperCAmelCase : Optional[int] = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
__UpperCAmelCase : Union[str, Any] = torch.floataa
elif isinstance(__lowercase , __lowercase ):
__UpperCAmelCase : int = getattr(__lowercase , __lowercase )
elif isinstance(__lowercase , torch.dtype ):
__UpperCAmelCase : str = bnb_abit_compute_dtype
else:
raise ValueError("""bnb_4bit_compute_dtype must be a string or a torch.dtype""" )
self.post_init()
def UpperCAmelCase ( self : int ) -> str:
if not isinstance(self.llm_inta_threshold , __lowercase ):
raise ValueError("""llm_int8_threshold must be a float""" )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __lowercase ):
raise ValueError("""llm_int8_skip_modules must be a list of strings""" )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __lowercase ):
raise ValueError("""llm_int8_enable_fp32_cpu_offload must be a boolean""" )
if not isinstance(self.llm_inta_has_fpaa_weight , __lowercase ):
raise ValueError("""llm_int8_has_fp16_weight must be a boolean""" )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError("""bnb_4bit_compute_dtype must be torch.dtype""" )
if not isinstance(self.bnb_abit_quant_type , __lowercase ):
raise ValueError("""bnb_4bit_quant_type must be a string""" )
if not isinstance(self.bnb_abit_use_double_quant , __lowercase ):
raise ValueError("""bnb_4bit_use_double_quant must be a boolean""" )
if self.load_in_abit and not version.parse(importlib.metadata.version("""bitsandbytes""" ) ) >= version.parse(
"""0.39.0""" ):
raise ValueError(
"""4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version""" )
def UpperCAmelCase ( self : str ) -> int:
return self.load_in_abit or self.load_in_abit
def UpperCAmelCase ( self : Optional[Any] ) -> int:
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def UpperCAmelCase ( cls : List[Any] , __lowercase : Dict , __lowercase : Optional[int] , **__lowercase : int ) -> List[Any]:
__UpperCAmelCase : Any = cls(**__lowercase )
__UpperCAmelCase : Dict = []
for key, value in kwargs.items():
if hasattr(__lowercase , __lowercase ):
setattr(__lowercase , __lowercase , __lowercase )
to_remove.append(__lowercase )
for key in to_remove:
kwargs.pop(__lowercase , __lowercase )
if return_unused_kwargs:
return config, kwargs
else:
return config
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : Union[str, os.PathLike] ) -> Union[str, Any]:
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
__UpperCAmelCase : List[str] = self.to_dict()
__UpperCAmelCase : Dict = json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + """\n"""
writer.write(__lowercase )
def UpperCAmelCase ( self : List[str] ) -> List[str]:
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(self.__dict__ )
__UpperCAmelCase : Tuple = str(output["""bnb_4bit_compute_dtype"""] ).split(""".""" )[1]
return output
def __repr__( self : List[Any] ) -> Optional[int]:
return f"""{self.__class__.__name__} {self.to_json_string()}"""
def UpperCAmelCase ( self : Union[str, Any] , __lowercase : bool = True ) -> List[str]:
if use_diff is True:
__UpperCAmelCase : str = self.to_diff_dict()
else:
__UpperCAmelCase : str = self.to_dict()
return json.dumps(__lowercase , indent=2 , sort_keys=__lowercase ) + "\n"
def UpperCAmelCase ( self : Dict ) -> List[str]:
__UpperCAmelCase : Optional[int] = self.to_dict()
# get the default config dict
__UpperCAmelCase : Union[str, Any] = BitsAndBytesConfig().to_dict()
__UpperCAmelCase : List[str] = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
__UpperCAmelCase : List[str] = value
return serializable_config_dict
| 114 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 0 |
def lowercase( UpperCamelCase_ = 1000000 ) -> int:
'''simple docstring'''
UpperCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
UpperCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 343 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67 | 0 |
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowerCAmelCase : str = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowerCAmelCase : Optional[Any] = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a =numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=UpperCamelCase__ )[0]
@deprecated(UpperCamelCase__ , 'Please use tf.data to implement this functionality.' )
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
__a =_readaa(UpperCamelCase__ )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
__a =_readaa(UpperCamelCase__ )
__a =_readaa(UpperCamelCase__ )
__a =_readaa(UpperCamelCase__ )
__a =bytestream.read(rows * cols * num_images )
__a =numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
__a =data.reshape(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , 1 )
return data
@deprecated(UpperCamelCase__ , 'Please use tf.one_hot on tensors.' )
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Any ):
"""simple docstring"""
__a =labels_dense.shape[0]
__a =numpy.arange(UpperCamelCase__ ) * num_classes
__a =numpy.zeros((num_labels, num_classes) )
__a =1
return labels_one_hot
@deprecated(UpperCamelCase__ , 'Please use tf.data to implement this functionality.' )
def UpperCamelCase_( _snake_case : Tuple , _snake_case : int=False , _snake_case : Any=10 ):
"""simple docstring"""
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=UpperCamelCase__ ) as bytestream:
__a =_readaa(UpperCamelCase__ )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
__a =_readaa(UpperCamelCase__ )
__a =bytestream.read(UpperCamelCase__ )
__a =numpy.frombuffer(UpperCamelCase__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(UpperCamelCase__ , UpperCamelCase__ )
return labels
class __magic_name__ :
@deprecated(
__snake_case , 'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' , )
def __init__( self , __snake_case , __snake_case , __snake_case=False , __snake_case=False , __snake_case=dtypes.floataa , __snake_case=True , __snake_case=None , ) -> Optional[Any]:
'''simple docstring'''
__a , __a =random_seed.get_seed(__snake_case )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__a =dtypes.as_dtype(__snake_case ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
__a =1_0000
__a =one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f'images.shape: {images.shape} labels.shape: {labels.shape}'
__a =images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__a =images.reshape(
images.shape[0] , images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__a =images.astype(numpy.floataa )
__a =numpy.multiply(__snake_case , 1.0 / 255.0 )
__a =images
__a =labels
__a =0
__a =0
@property
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
return self._images
@property
def __magic_name__ ( self ) -> Any:
'''simple docstring'''
return self._labels
@property
def __magic_name__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self._num_examples
@property
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
return self._epochs_completed
def __magic_name__ ( self , __snake_case , __snake_case=False , __snake_case=True ) -> Tuple:
'''simple docstring'''
if fake_data:
__a =[1] * 784
__a =[1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(__snake_case )],
[fake_label for _ in range(__snake_case )],
)
__a =self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__a =numpy.arange(self._num_examples )
numpy.random.shuffle(__snake_case )
__a =self.images[perma]
__a =self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__a =self._num_examples - start
__a =self._images[start : self._num_examples]
__a =self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__a =numpy.arange(self._num_examples )
numpy.random.shuffle(__snake_case )
__a =self.images[perm]
__a =self.labels[perm]
# Start next epoch
__a =0
__a =batch_size - rest_num_examples
__a =self._index_in_epoch
__a =self._images[start:end]
__a =self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) , axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) , axis=0 ),
)
else:
self._index_in_epoch += batch_size
__a =self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(UpperCamelCase__ , 'Please write your own downloading logic.' )
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
if not gfile.Exists(UpperCamelCase__ ):
gfile.MakeDirs(UpperCamelCase__ )
__a =os.path.join(UpperCamelCase__ , UpperCamelCase__ )
if not gfile.Exists(UpperCamelCase__ ):
urllib.request.urlretrieve(UpperCamelCase__ , UpperCamelCase__ ) # noqa: S310
with gfile.GFile(UpperCamelCase__ ) as f:
__a =f.size()
print('Successfully downloaded' , UpperCamelCase__ , UpperCamelCase__ , 'bytes.' )
return filepath
@deprecated(
UpperCamelCase__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : int=False , _snake_case : Any=False , _snake_case : Union[str, Any]=dtypes.floataa , _snake_case : List[str]=True , _snake_case : List[Any]=5000 , _snake_case : Optional[int]=None , _snake_case : int=DEFAULT_SOURCE_URL , ):
"""simple docstring"""
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=UpperCamelCase__ , one_hot=UpperCamelCase__ , dtype=UpperCamelCase__ , seed=UpperCamelCase__ )
__a =fake()
__a =fake()
__a =fake()
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
if not source_url: # empty string check
__a =DEFAULT_SOURCE_URL
__a ='train-images-idx3-ubyte.gz'
__a ='train-labels-idx1-ubyte.gz'
__a ='t10k-images-idx3-ubyte.gz'
__a ='t10k-labels-idx1-ubyte.gz'
__a =_maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_images_file )
with gfile.Open(UpperCamelCase__ , 'rb' ) as f:
__a =_extract_images(UpperCamelCase__ )
__a =_maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + train_labels_file )
with gfile.Open(UpperCamelCase__ , 'rb' ) as f:
__a =_extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
__a =_maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_images_file )
with gfile.Open(UpperCamelCase__ , 'rb' ) as f:
__a =_extract_images(UpperCamelCase__ )
__a =_maybe_download(
UpperCamelCase__ , UpperCamelCase__ , source_url + test_labels_file )
with gfile.Open(UpperCamelCase__ , 'rb' ) as f:
__a =_extract_labels(UpperCamelCase__ , one_hot=UpperCamelCase__ )
if not 0 <= validation_size <= len(UpperCamelCase__ ):
__a =(
'Validation size should be between 0 and '
F'{len(UpperCamelCase__ )}. Received: {validation_size}.'
)
raise ValueError(UpperCamelCase__ )
__a =train_images[:validation_size]
__a =train_labels[:validation_size]
__a =train_images[validation_size:]
__a =train_labels[validation_size:]
__a ={'dtype': dtype, 'reshape': reshape, 'seed': seed}
__a =_DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
__a =_DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
__a =_DataSet(UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ )
return _Datasets(train=UpperCamelCase__ , validation=UpperCamelCase__ , test=UpperCamelCase__ )
| 218 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67 | 0 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Any = logging.get_logger(__name__)
_lowerCamelCase : Any = {
"facebook/data2vec-base-960h": "https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json",
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "data2vec-audio"
def __init__( self : Optional[Any] , UpperCamelCase__ : int=3_2 , UpperCamelCase__ : int=7_6_8 , UpperCamelCase__ : Optional[Any]=1_2 , UpperCamelCase__ : Any=1_2 , UpperCamelCase__ : str=3_0_7_2 , UpperCamelCase__ : Tuple="gelu" , UpperCamelCase__ : List[Any]=0.1 , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : List[Any]=0.0 , UpperCamelCase__ : Dict=0.1 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : int=0.0_2 , UpperCamelCase__ : List[Any]=1E-5 , UpperCamelCase__ : List[str]="gelu" , UpperCamelCase__ : List[str]=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , UpperCamelCase__ : Optional[Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase__ : Any=(1_0, 3, 3, 3, 3, 2, 2) , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=1_6 , UpperCamelCase__ : Optional[Any]=1_9 , UpperCamelCase__ : Optional[int]=5 , UpperCamelCase__ : str=0.0_5 , UpperCamelCase__ : Union[str, Any]=1_0 , UpperCamelCase__ : Tuple=2 , UpperCamelCase__ : int=0.0 , UpperCamelCase__ : List[str]=1_0 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : List[Any]="sum" , UpperCamelCase__ : Dict=False , UpperCamelCase__ : List[Any]=False , UpperCamelCase__ : Dict=2_5_6 , UpperCamelCase__ : Tuple=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , UpperCamelCase__ : Any=(5, 3, 3, 1, 1) , UpperCamelCase__ : Optional[Any]=(1, 2, 3, 1, 1) , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : str=0 , UpperCamelCase__ : Dict=1 , UpperCamelCase__ : Optional[int]=2 , UpperCamelCase__ : Optional[int]=False , UpperCamelCase__ : List[Any]=3 , UpperCamelCase__ : int=2 , UpperCamelCase__ : Any=3 , UpperCamelCase__ : Optional[int]=None , **UpperCamelCase__ : str , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ , pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
UpperCamelCase = hidden_size
UpperCamelCase = feat_extract_activation
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = conv_bias
UpperCamelCase = num_conv_pos_embeddings
UpperCamelCase = num_conv_pos_embedding_groups
UpperCamelCase = conv_pos_kernel_size
UpperCamelCase = len(self.conv_dim )
UpperCamelCase = num_hidden_layers
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_dropout
UpperCamelCase = attention_dropout
UpperCamelCase = activation_dropout
UpperCamelCase = feat_proj_dropout
UpperCamelCase = final_dropout
UpperCamelCase = layerdrop
UpperCamelCase = layer_norm_eps
UpperCamelCase = initializer_range
UpperCamelCase = vocab_size
UpperCamelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase = mask_time_prob
UpperCamelCase = mask_time_length
UpperCamelCase = mask_time_min_masks
UpperCamelCase = mask_feature_prob
UpperCamelCase = mask_feature_length
UpperCamelCase = mask_feature_min_masks
# ctc loss
UpperCamelCase = ctc_loss_reduction
UpperCamelCase = ctc_zero_infinity
# adapter
UpperCamelCase = add_adapter
UpperCamelCase = adapter_kernel_size
UpperCamelCase = adapter_stride
UpperCamelCase = num_adapter_layers
UpperCamelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = list(UpperCamelCase__ )
UpperCamelCase = xvector_output_dim
@property
def A ( self : int ):
"""simple docstring"""
return math.prod(self.conv_stride )
| 28 | '''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 0 |
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
a_ = re.compile(r'^(?P<major>\d+)' r'\.(?P<minor>\d+)' r'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class _lowercase :
lowercase = 42
lowercase = None
lowercase = None
lowercase = None
lowercase = None
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Any:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = _str_to_version_tuple(self.version_str )
def __repr__( self : int ) -> Optional[int]:
"""simple docstring"""
return f"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
return self.major, self.minor, self.patch
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if isinstance(snake_case , snake_case ):
return Version(snake_case )
elif isinstance(snake_case , snake_case ):
return other
raise TypeError(f"{other} (type {type(snake_case )}) cannot be compared to version." )
def __eq__( self : Union[str, Any] , snake_case : Any ) -> Optional[Any]:
"""simple docstring"""
try:
UpperCamelCase_ : Optional[Any] = self._validate_operand(snake_case )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : int , snake_case : Any ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : Dict = self._validate_operand(snake_case )
return self.tuple < other.tuple
def __hash__( self : str ) -> Optional[Any]:
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[Any] , snake_case : Tuple ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Tuple = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> int:
"""simple docstring"""
return self.version_str
def __lowercase ( lowerCamelCase : Dict ):
UpperCamelCase_ : Optional[Any] = _VERSION_REG.match(UpperCamelCase__ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(UpperCamelCase__ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def __lowercase ( lowerCamelCase : Union[str, Any] ):
return ".".join(str(UpperCamelCase__ ) for v in version_tuple )
| 175 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase ={
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
__UpperCAmelCase ="▁"
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =["input_ids", "attention_mask"]
lowerCamelCase : Union[str, Any] =BarthezTokenizer
def __init__( self : Optional[Any] , a : Dict=None , a : str=None , a : List[Any]="<s>" , a : Optional[int]="</s>" , a : List[str]="</s>" , a : Tuple="<s>" , a : str="<unk>" , a : Any="<pad>" , a : Union[str, Any]="<mask>" , **a : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 67 | 0 |
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
__A = "base_with_context"
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(weights["token_embedder"]["embedding"] ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__: Optional[Any] =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: List[Any] =ly_weight["attention"]
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =nn.Parameter(torch.FloatTensor(weights["input_proj"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ )
for lyr_num, lyr in enumerate(model.encoders ):
lowerCamelCase__: Union[str, Any] =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: List[Any] =ly_weight["attention"]
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: Any =nn.Parameter(
torch.FloatTensor(ly_weight["pre_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(weights["encoder_norm"]["scale"] ) )
return model
def lowerCAmelCase_ ( __a , __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(weights["time_emb_dense0"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(weights["time_emb_dense1"]["kernel"].T ) )
lowerCamelCase__: Optional[int] =nn.Parameter(
torch.FloatTensor(weights["Embed_0"]["embedding"] ) , requires_grad=UpperCamelCase__ )
lowerCamelCase__: str =nn.Parameter(
torch.FloatTensor(weights["continuous_inputs_projection"]["kernel"].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
lowerCamelCase__: List[Any] =weights[F"""layers_{lyr_num}"""]
lowerCamelCase__: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_self_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_0"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase__: int =ly_weight["self_attention"]
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: Dict =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: List[Any] =ly_weight["MultiHeadDotProductAttention_0"]
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(attention_weights["query"]["kernel"].T ) )
lowerCamelCase__: int =nn.Parameter(torch.FloatTensor(attention_weights["key"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["value"]["kernel"].T ) )
lowerCamelCase__: List[str] =nn.Parameter(torch.FloatTensor(attention_weights["out"]["kernel"].T ) )
lowerCamelCase__: List[Any] =nn.Parameter(
torch.FloatTensor(ly_weight["pre_cross_attention_layer_norm"]["scale"] ) )
lowerCamelCase__: Any =nn.Parameter(torch.FloatTensor(ly_weight["pre_mlp_layer_norm"]["scale"] ) )
lowerCamelCase__: Any =nn.Parameter(
torch.FloatTensor(ly_weight["FiLMLayer_1"]["DenseGeneral_0"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_0"]["kernel"].T ) )
lowerCamelCase__: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wi_1"]["kernel"].T ) )
lowerCamelCase__: Tuple =nn.Parameter(torch.FloatTensor(ly_weight["mlp"]["wo"]["kernel"].T ) )
lowerCamelCase__: str =nn.Parameter(torch.FloatTensor(weights["decoder_norm"]["scale"] ) )
lowerCamelCase__: Optional[int] =nn.Parameter(torch.FloatTensor(weights["spec_out_dense"]["kernel"].T ) )
return model
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: List[str] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
lowerCamelCase__: Optional[Any] =jnp.tree_util.tree_map(onp.array , UpperCamelCase__ )
lowerCamelCase__: Union[str, Any] =[
"from __gin__ import dynamic_registration",
"from music_spectrogram_diffusion.models.diffusion import diffusion_utils",
"diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0",
"diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()",
]
lowerCamelCase__: Any =os.path.join(args.checkpoint_path , ".." , "config.gin" )
lowerCamelCase__: Optional[int] =inference.parse_training_gin_file(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__: List[str] =inference.InferenceModel(args.checkpoint_path , UpperCamelCase__ )
lowerCamelCase__: Union[str, Any] =DDPMScheduler(beta_schedule="squaredcos_cap_v2" , variance_type="fixed_large" )
lowerCamelCase__: Dict =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length["inputs"] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCamelCase__: Dict =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length["targets_context"] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj="gated-gelu" , )
lowerCamelCase__: List[str] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length["targets_context"] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
lowerCamelCase__: List[Any] =load_notes_encoder(ta_checkpoint["target"]["token_encoder"] , UpperCamelCase__ )
lowerCamelCase__: List[str] =load_continuous_encoder(ta_checkpoint["target"]["continuous_encoder"] , UpperCamelCase__ )
lowerCamelCase__: int =load_decoder(ta_checkpoint["target"]["decoder"] , UpperCamelCase__ )
lowerCamelCase__: Any =OnnxRuntimeModel.from_pretrained("kashif/soundstream_mel_decoder" )
lowerCamelCase__: Union[str, Any] =SpectrogramDiffusionPipeline(
notes_encoder=UpperCamelCase__ , continuous_encoder=UpperCamelCase__ , decoder=UpperCamelCase__ , scheduler=UpperCamelCase__ , melgan=UpperCamelCase__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
parser.add_argument("--output_path", default=None, type=str, required=True, help="Path to the converted model.")
parser.add_argument(
"--save", default=True, type=bool, required=False, help="Whether to save the converted model or not."
)
parser.add_argument(
"--checkpoint_path",
default=f'{MODEL}/checkpoint_500000',
type=str,
required=False,
help="Path to the original jax model checkpoint.",
)
__A = parser.parse_args()
main(args)
| 10 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 0 |
def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Dict = []
__lowerCAmelCase: int = set({'(', '[', '{'} )
__lowerCAmelCase: List[Any] = set({')', ']', '}'} )
__lowerCAmelCase: Optional[int] = {'{': '}', '[': ']', '(': ')'}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def _a ( ) -> str:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = input('Enter sequence of brackets: ' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , 'is balanced' )
else:
print(UpperCamelCase__ , 'is not balanced' )
if __name__ == "__main__":
main()
| 322 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 0 |
'''simple docstring'''
class lowerCamelCase_ ( UpperCAmelCase__ ):
pass
class lowerCamelCase_ ( UpperCAmelCase__ ):
pass
class lowerCamelCase_ :
def __init__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : str = [
[],
[],
[],
]
def lowercase_ ( self : Dict , _A : int , _A : int ):
'''simple docstring'''
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError('''Maximum queue size is 100''' )
self.queues[priority].append(_A )
except IndexError:
raise ValueError('''Valid priorities are 0, 1, and 2''' )
def lowercase_ ( self : int ):
'''simple docstring'''
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError('''All queues are empty''' )
def __str__( self : Optional[Any] ):
'''simple docstring'''
return "\n".join(f"""Priority {i}: {q}""" for i, q in enumerate(self.queues ) )
class lowerCamelCase_ :
def __init__( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Dict = []
def lowercase_ ( self : Dict , _A : int ):
'''simple docstring'''
if len(self.queue ) == 100:
raise OverFlowError('''Maximum queue size is 100''' )
self.queue.append(_A )
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
if not self.queue:
raise UnderFlowError('''The queue is empty''' )
else:
UpperCAmelCase__ : int = min(self.queue )
self.queue.remove(_A )
return data
def __str__( self : Optional[Any] ):
'''simple docstring'''
return str(self.queue )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : int = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 1_00 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 1_28 )
print(UpperCamelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(UpperCamelCase__ )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def a__ ( ) -> int:
UpperCAmelCase__ : Any = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(1_00 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(1_28 )
print(UpperCamelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(UpperCamelCase__ )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue()
| 181 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 0 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __UpperCamelCase ( UpperCAmelCase__ , unittest.TestCase ):
# TODO: is there an appropriate internal test set?
lowercase : str ="ssube/stable-diffusion-x4-upscaler-onnx"
def lowercase__ ( self, lowerCAmelCase=0 ):
"""simple docstring"""
lowerCamelCase_ =floats_tensor((1, 3, 128, 128), rng=random.Random(lowerCAmelCase ) )
lowerCamelCase_ =torch.manual_seed(lowerCAmelCase )
lowerCamelCase_ ={
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =PNDMScheduler.from_config(pipe.scheduler.config, skip_prk_steps=lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_8_9_8_8_9_2, 0.5_9_2_4_0_5_5_6, 0.5_2_4_9_9_5_2_7, 0.5_8_8_6_6_2_1_5, 0.5_2_2_5_8_2_3_5, 0.5_2_5_7_2_7_1_5, 0.6_2_4_1_4_4_7_3, 0.6_1_7_4_3_8_7, 0.6_2_1_4_9_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.7_6_5_9_2_7_8, 0.7_6_4_3_7_6_6_4, 0.7_5_5_7_9_1_0_7, 0.7_6_9_1_1_1_6, 0.7_7_6_6_6_9_8_6, 0.7_7_2_7_6_7_2, 0.7_7_5_8_6_6_4, 0.7_8_1_2_2_2_6, 0.7_6_9_4_2_5_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.6_9_7_4_7_8_2, 0.6_8_9_0_2_0_9_3, 0.7_0_1_3_5_8_8_5, 0.7_5_8_3_6_1_8, 0.7_8_0_4_5_4_5, 0.7_8_5_4_9_1_2, 0.7_8_6_6_7_4_2_6, 0.7_8_7_4_3_8_6_3, 0.7_8_0_7_0_2_2_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint, provider='''CPUExecutionProvider''' )
lowerCamelCase_ =EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ =self.get_dummy_inputs()
lowerCamelCase_ =pipe(**lowerCAmelCase ).images
lowerCamelCase_ =image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.7_7_4_2_4_4_9_6, 0.7_7_3_6_0_1, 0.7_6_4_5_2_8_8, 0.7_7_6_9_5_9_8, 0.7_7_7_2_7_3_9, 0.7_7_3_8_6_8_8, 0.7_8_1_8_7_2_3_3, 0.7_7_8_7_9_5_8_4, 0.7_6_7_0_4_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class __UpperCamelCase ( unittest.TestCase ):
@property
def lowercase__ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =ort.SessionOptions()
lowerCamelCase_ =False
return options
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ =init_image.resize((128, 128) )
# using the PNDM scheduler by default
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ ='''A fantasy landscape, trending on artstation'''
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=lowerCAmelCase, image=lowerCAmelCase, guidance_scale=7.5, num_inference_steps=10, generator=lowerCAmelCase, output_type='''np''', )
lowerCamelCase_ =output.images
lowerCamelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array([0.4_8_8_3, 0.4_9_4_7, 0.4_9_8_0, 0.4_9_7_5, 0.4_9_8_2, 0.4_9_8_0, 0.5_0_0_0, 0.5_0_0_6, 0.4_9_7_2] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def lowercase__ ( self ):
"""simple docstring"""
lowerCamelCase_ =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowerCamelCase_ =init_image.resize((128, 128) )
lowerCamelCase_ =LMSDiscreteScheduler.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', subfolder='''scheduler''' )
lowerCamelCase_ =OnnxStableDiffusionUpscalePipeline.from_pretrained(
'''ssube/stable-diffusion-x4-upscaler-onnx''', scheduler=lowerCAmelCase, provider=self.gpu_provider, sess_options=self.gpu_options, )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
lowerCamelCase_ ='''A fantasy landscape, trending on artstation'''
lowerCamelCase_ =torch.manual_seed(0 )
lowerCamelCase_ =pipe(
prompt=lowerCAmelCase, image=lowerCAmelCase, guidance_scale=7.5, num_inference_steps=20, generator=lowerCAmelCase, output_type='''np''', )
lowerCamelCase_ =output.images
lowerCamelCase_ =images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
lowerCamelCase_ =np.array(
[0.5_0_1_7_3_7_5_3, 0.5_0_2_2_3_3_5_6, 0.5_0_2_0_3_9, 0.5_0_2_3_3_0_3_6, 0.5_0_2_3_7_2_5, 0.5_0_2_2_6_0_1, 0.5_0_1_8_7_5_8, 0.5_0_2_3_4_0_8_5, 0.5_0_2_4_1_5_6_6] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 75 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 | 0 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE : Dict = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCamelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCamelCase : str = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def A ( _lowercase , _lowercase ):
try:
SCREAMING_SNAKE_CASE : str = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def A ( _lowercase , _lowercase ):
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def A ( _lowercase , _lowercase = "student" , _lowercase = None , _lowercase = None , _lowercase=False , _lowercase=None , _lowercase=None , **_lowercase , ):
SCREAMING_SNAKE_CASE : Tuple = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
SCREAMING_SNAKE_CASE : List[str] = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
SCREAMING_SNAKE_CASE : Optional[Any] = teacher.config.to_diff_dict()
try:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Tuple = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = teacher_e
if d is None:
SCREAMING_SNAKE_CASE : Any = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
SCREAMING_SNAKE_CASE : Optional[Any] = teacher_e
if d is None:
SCREAMING_SNAKE_CASE : Tuple = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
SCREAMING_SNAKE_CASE : str = teacher.config_class(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : str = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
SCREAMING_SNAKE_CASE : str = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : int = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
SCREAMING_SNAKE_CASE : Optional[Any] = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
SCREAMING_SNAKE_CASE : Union[str, Any] = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
SCREAMING_SNAKE_CASE : int = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 182 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
requires_backends(self , '''vision''' )
self.check_model_type(a )
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model(**a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(a )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 67 | 0 |
"""simple docstring"""
def lowercase (snake_case__ : Any , snake_case__ : int , snake_case__ : Any , snake_case__ : Optional[int] , snake_case__ : List[str] , ) -> float:
'''simple docstring'''
lowerCAmelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
lowerCAmelCase = 1 - (matter_density + radiation_density + dark_energy)
lowerCAmelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCAmelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 155 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCamelCase__ ( __lowerCamelCase : Any ):
__UpperCAmelCase : str = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""_float_tensor""",
"""decoder.output_projection.weight""",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase__ ( __lowerCamelCase : List[Any] ):
__UpperCAmelCase , __UpperCAmelCase : List[Any] = emb.weight.shape
__UpperCAmelCase : Tuple = nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
__UpperCAmelCase : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any]="facebook/mbart-large-en-ro" , __lowerCamelCase : Tuple=False , __lowerCamelCase : Optional[int]=False ):
__UpperCAmelCase : Union[str, Any] = torch.load(UpperCamelCase__ , map_location="""cpu""" )["""model"""]
remove_ignore_keys_(UpperCamelCase__ )
__UpperCAmelCase : List[str] = state_dict["""encoder.embed_tokens.weight"""].shape[0]
__UpperCAmelCase : Optional[Any] = MBartConfig.from_pretrained(UpperCamelCase__ , vocab_size=UpperCamelCase__ )
if mbart_aa and finetuned:
__UpperCAmelCase : Optional[Any] = """relu"""
__UpperCAmelCase : Optional[int] = state_dict["""decoder.embed_tokens.weight"""]
__UpperCAmelCase : str = MBartForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ )
if finetuned:
__UpperCAmelCase : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem."
)
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--hf_config",
default="facebook/mbart-large-cc25",
type=str,
help="Which huggingface architecture to use: mbart-large",
)
parser.add_argument("--mbart_50", action="store_true", help="whether the model is mMART-50 checkpoint")
parser.add_argument("--finetuned", action="store_true", help="whether the model is a fine-tuned checkpoint")
a : Optional[int] = parser.parse_args()
a : Optional[int] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 114 | '''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 | 0 |
import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> Union[str, Any]:
'''simple docstring'''
def run_func(UpperCamelCase_ ):
@wraps(UpperCamelCase__ )
def run_in_eager_mode(*UpperCamelCase_ , **UpperCamelCase_ ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
@wraps(UpperCamelCase__ )
@tf.function(experimental_compile=UpperCamelCase__ )
def run_in_graph_mode(*UpperCamelCase_ , **UpperCamelCase_ ):
return func(*UpperCamelCase__ , **UpperCamelCase__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"""Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`.""" )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> ["tf.Tensor"]:
'''simple docstring'''
UpperCamelCase = random.Random()
UpperCamelCase = [rng.randint(0 , vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(UpperCamelCase__ , shape=(batch_size, sequence_length) , dtype=tf.intaa )
class SCREAMING_SNAKE_CASE_ ( UpperCAmelCase__ ):
__lowerCAmelCase = 42
__lowerCAmelCase = 42
__lowerCAmelCase = "TensorFlow"
@property
def lowerCamelCase_ ( self : int ):
"""simple docstring"""
return tf.__version__
def lowerCamelCase_ ( self : List[str] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase = self._prepare_inference_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_speed(_inference )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase = self._prepare_train_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_speed(_train )
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase_ )
UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase = self._prepare_inference_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_memory(_inference )
def lowerCamelCase_ ( self : Dict , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , lowerCamelCase_ )
UpperCamelCase = self.args.strategy
if strategy is None:
raise ValueError("""A device strategy has to be initialized before using TensorFlow.""" )
UpperCamelCase = self._prepare_train_func(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self._measure_memory(_train )
def lowerCamelCase_ ( self : Union[str, Any] , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase = (
hasattr(lowerCamelCase_ , """architectures""" )
and isinstance(config.architectures , lowerCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_cls(lowerCamelCase_ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase = TF_MODEL_MAPPING[config.__class__](lowerCamelCase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase = config.vocab_size if hasattr(lowerCamelCase_ , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase = random_input_ids(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , training=lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(lowerCamelCase_ , training=lowerCamelCase_ )
UpperCamelCase = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def lowerCamelCase_ ( self : Any , lowerCamelCase_ : str , lowerCamelCase_ : int , lowerCamelCase_ : int ):
"""simple docstring"""
UpperCamelCase = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("""Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`.""" )
if self.args.fpaa:
raise NotImplementedError("""Mixed precision is currently not supported.""" )
UpperCamelCase = (
hasattr(lowerCamelCase_ , """architectures""" )
and isinstance(config.architectures , lowerCamelCase_ )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
UpperCamelCase = """TF""" + config.architectures[0] # prepend 'TF' for tensorflow model
UpperCamelCase = __import__("""transformers""" , fromlist=[model_class] )
UpperCamelCase = getattr(lowerCamelCase_ , lowerCamelCase_ )
UpperCamelCase = model_cls(lowerCamelCase_ )
except ImportError:
raise ImportError(
f"""{model_class} does not exist. If you just want to test the pretrained model, you might want to"""
""" set `--only_pretrain_model` or `args.only_pretrain_model=True`.""" )
else:
UpperCamelCase = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](lowerCamelCase_ )
# encoder-decoder has vocab size saved differently
UpperCamelCase = config.vocab_size if hasattr(lowerCamelCase_ , """vocab_size""" ) else config.encoder.vocab_size
UpperCamelCase = random_input_ids(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
UpperCamelCase = model(lowerCamelCase_ , decoder_input_ids=lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ )[0]
UpperCamelCase = tf.gradients(lowerCamelCase_ , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
UpperCamelCase = model(lowerCamelCase_ , labels=lowerCamelCase_ , training=lowerCamelCase_ )[0]
UpperCamelCase = tf.gradients(lowerCamelCase_ , model.trainable_variables )
return gradients
UpperCamelCase = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : List[str] ):
"""simple docstring"""
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("""Do inference on TPU. Running model 5 times to stabilize compilation""" )
timeit.repeat(lowerCamelCase_ , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
UpperCamelCase = timeit.repeat(
lowerCamelCase_ , repeat=self.args.repeat , number=10 , )
return min(lowerCamelCase_ ) / 1_0.0
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
def lowerCamelCase_ ( self : Optional[Any] , lowerCamelCase_ : Callable[[], None] ):
"""simple docstring"""
logger.info(
"""Note that TensorFlow allocates more memory than """
"""it might need to speed up computation. """
"""The memory reported here corresponds to the memory """
"""reported by `nvidia-smi`, which can vary depending """
"""on total available memory on the GPU that is used.""" )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"""`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"""
""" consumption line by line.""" )
UpperCamelCase = start_memory_tracing("""transformers""" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"""Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"""
""" with `args.memory=False`""" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"""py3nvml not installed, we won\'t log GPU memory usage. """
"""Install py3nvml (pip install py3nvml) to log information about GPU.""" )
UpperCamelCase = """N/A"""
else:
logger.info(
"""Measuring total GPU usage on GPU device. Make sure to not have additional processes"""
""" running on the same GPU.""" )
# init nvml
nvml.nvmlInit()
func()
UpperCamelCase = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
UpperCamelCase = nvml.nvmlDeviceGetMemoryInfo(lowerCamelCase_ )
UpperCamelCase = meminfo.used
UpperCamelCase = Memory(lowerCamelCase_ )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"""When enabling line by line tracing, the max peak memory for CPU is inaccurate in"""
""" TensorFlow.""" )
UpperCamelCase = None
else:
UpperCamelCase = measure_peak_memory_cpu(lowerCamelCase_ )
UpperCamelCase = Memory(lowerCamelCase_ ) if isinstance(lowerCamelCase_ , lowerCamelCase_ ) else memory_bytes
if self.args.trace_memory_line_by_line:
UpperCamelCase = stop_memory_tracing(lowerCamelCase_ )
if memory is None:
UpperCamelCase = summary.total
else:
UpperCamelCase = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f"""Doesn't fit on GPU. {e}""" )
return "N/A", None
| 343 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , __snake_case , __snake_case=13 , __snake_case=7 , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=True , __snake_case=99 , __snake_case=32 , __snake_case=5 , __snake_case=4 , __snake_case=37 , __snake_case="gelu" , __snake_case=0.1 , __snake_case=0.1 , __snake_case=512 , __snake_case=16 , __snake_case=2 , __snake_case=0.02 , __snake_case=4 , ) -> Optional[Any]:
'''simple docstring'''
__a =parent
__a =batch_size
__a =seq_length
__a =is_training
__a =use_attention_mask
__a =use_token_type_ids
__a =use_labels
__a =vocab_size
__a =hidden_size
__a =num_hidden_layers
__a =num_attention_heads
__a =intermediate_size
__a =hidden_act
__a =hidden_dropout_prob
__a =attention_probs_dropout_prob
__a =max_position_embeddings
__a =type_vocab_size
__a =type_sequence_label_size
__a =initializer_range
__a =num_choices
def __magic_name__ ( self ) -> int:
'''simple docstring'''
__a =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a =None
if self.use_attention_mask:
__a =random_attention_mask([self.batch_size, self.seq_length] )
__a =None
if self.use_token_type_ids:
__a =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__a =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
__a , __a , __a , __a =config_and_inputs
__a ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __magic_name__ ( self ) -> List[str]:
'''simple docstring'''
__a =self.prepare_config_and_inputs()
__a , __a , __a , __a =config_and_inputs
__a =True
__a =floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
__a =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __magic_name__ ( UpperCAmelCase__ , unittest.TestCase ):
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
__a =FlaxRobertaPreLayerNormModelTester(self )
@slow
def __magic_name__ ( self ) -> Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
__a =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__snake_case )
__a =model(np.ones((1, 1) ) )
self.assertIsNotNone(__snake_case )
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__snake_case )
__a =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__a =model(__snake_case )[0]
__a =[1, 11, 5_0265]
self.assertEqual(list(output.shape ) , __snake_case )
# compare the actual values for a slice.
__a =np.array(
[[[40.4880, 18.0199, -5.2367], [-1.8877, -4.0885, 10.7085], [-2.2613, -5.6110, 7.2665]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def __magic_name__ ( self ) -> Optional[Any]:
'''simple docstring'''
__a =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=__snake_case )
__a =np.array([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] , dtype=jnp.intaa )
__a =model(__snake_case )[0]
# compare the actual values for a slice.
__a =np.array(
[[[0.0208, -0.0356, 0.0237], [-0.1569, -0.0411, -0.2626], [0.1879, 0.0125, -0.0089]]] , dtype=np.floataa )
self.assertTrue(np.allclose(output[:, :3, :3] , __snake_case , atol=1e-4 ) )
| 218 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 0 |
'''simple docstring'''
import tensorflow as tf
from ...tf_utils import shape_list
class SCREAMING_SNAKE_CASE ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str=1 , UpperCamelCase__ : Optional[int]=False , **UpperCamelCase__ : str ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = d_embed
UpperCamelCase = d_proj
UpperCamelCase = cutoffs + [vocab_size]
UpperCamelCase = [0] + self.cutoffs
UpperCamelCase = div_val
UpperCamelCase = self.cutoffs[0]
UpperCamelCase = len(self.cutoffs ) - 1
UpperCamelCase = self.shortlist_size + self.n_clusters
UpperCamelCase = keep_order
UpperCamelCase = []
UpperCamelCase = []
def A ( self : int , UpperCamelCase__ : int ):
"""simple docstring"""
if self.n_clusters > 0:
UpperCamelCase = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer='zeros' , trainable=UpperCamelCase__ , name='cluster_weight' )
UpperCamelCase = self.add_weight(
shape=(self.n_clusters,) , initializer='zeros' , trainable=UpperCamelCase__ , name='cluster_bias' )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCamelCase = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_projs_._{i}""" , )
self.out_projs.append(UpperCamelCase__ )
else:
self.out_projs.append(UpperCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase = self.add_weight(
shape=(self.vocab_size,) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCamelCase = self.d_embed // (self.div_val**i)
UpperCamelCase = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_projs_._{i}""" )
self.out_projs.append(UpperCamelCase__ )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._weight""" , )
UpperCamelCase = self.add_weight(
shape=(r_idx - l_idx,) , initializer='zeros' , trainable=UpperCamelCase__ , name=f"""out_layers_._{i}_._bias""" , )
self.out_layers.append((weight, bias) )
super().build(UpperCamelCase__ )
@staticmethod
def A ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : List[str]=None ):
"""simple docstring"""
UpperCamelCase = x
if proj is not None:
UpperCamelCase = tf.einsum('ibd,ed->ibe' , UpperCamelCase__ , UpperCamelCase__ )
return tf.einsum('ibd,nd->ibn' , UpperCamelCase__ , UpperCamelCase__ ) + b
@staticmethod
def A ( UpperCamelCase__ : List[str] , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = shape_list(UpperCamelCase__ )
UpperCamelCase = tf.range(lp_size[0] , dtype=target.dtype )
UpperCamelCase = tf.stack([r, target] , 1 )
return tf.gather_nd(UpperCamelCase__ , UpperCamelCase__ )
def A ( self : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int=True , UpperCamelCase__ : Dict=False ):
"""simple docstring"""
UpperCamelCase = 0
if self.n_clusters == 0:
UpperCamelCase = self._logit(UpperCamelCase__ , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCamelCase = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=UpperCamelCase__ , logits=UpperCamelCase__ )
UpperCamelCase = tf.nn.log_softmax(UpperCamelCase__ , axis=-1 )
else:
UpperCamelCase = shape_list(UpperCamelCase__ )
UpperCamelCase = []
UpperCamelCase = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCamelCase , UpperCamelCase = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCamelCase = (target >= l_idx) & (target < r_idx)
UpperCamelCase = tf.where(UpperCamelCase__ )
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ ) - l_idx
if self.div_val == 1:
UpperCamelCase = self.out_layers[0][0][l_idx:r_idx]
UpperCamelCase = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCamelCase = self.out_layers[i][0]
UpperCamelCase = self.out_layers[i][1]
if i == 0:
UpperCamelCase = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCamelCase = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCamelCase = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[0] )
UpperCamelCase = tf.nn.log_softmax(UpperCamelCase__ )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
else:
UpperCamelCase = self._logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.out_projs[i] )
UpperCamelCase = tf.nn.log_softmax(UpperCamelCase__ )
UpperCamelCase = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCamelCase = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(UpperCamelCase__ )
if target is not None:
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = tf.boolean_mask(UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = self._gather_logprob(UpperCamelCase__ , UpperCamelCase__ )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(UpperCamelCase__ , -cur_logprob , shape_list(UpperCamelCase__ ) )
UpperCamelCase = tf.concat(UpperCamelCase__ , axis=-1 )
if target is not None:
if return_mean:
UpperCamelCase = tf.reduce_mean(UpperCamelCase__ )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(UpperCamelCase__ )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(UpperCamelCase__ , name=self.name , aggregation='mean' if return_mean else '' )
return out
| 28 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="gpt_neox_japanese"
def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
| 67 | 0 |
def __lowercase ( lowerCamelCase : str ):
UpperCamelCase_ : Union[str, Any] = len(UpperCamelCase__ )
for _ in range(UpperCamelCase__ ):
for i in range(_ % 2 , arr_size - 1 , 2 ):
if arr[i + 1] < arr[i]:
UpperCamelCase_, UpperCamelCase_ : List[Any] = arr[i + 1], arr[i]
return arr
if __name__ == "__main__":
a_ = list(range(10, 0, -1))
print(F"""Original: {arr}. Sorted: {odd_even_transposition(arr)}""")
| 175 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
'''simple docstring'''
lowercase_ = field(default="image-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
lowercase_ = Features({"image": Image()} )
lowercase_ = Features({"labels": ClassLabel} )
lowercase_ = "image"
lowercase_ = "labels"
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int) ->Dict:
'''simple docstring'''
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""")
if not isinstance(features[self.label_column] , UpperCAmelCase_):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""")
lowerCamelCase__: Union[str, Any] =copy.deepcopy(self)
lowerCamelCase__: Dict =self.label_schema.copy()
lowerCamelCase__: Dict =features[self.label_column]
lowerCamelCase__: Tuple =label_schema
return task_template
@property
def SCREAMING_SNAKE_CASE_ (self : int) ->Optional[Any]:
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class A_ ( UpperCAmelCase__ ):
_lowercase : torch.FloatTensor
class A_ ( UpperCAmelCase__ , UpperCAmelCase__ ):
@register_to_config
def __init__( self : Tuple , UpperCAmelCase : int = 3_2 , UpperCAmelCase : int = 6_4 , UpperCAmelCase : int = 2_0 , UpperCAmelCase : int = 7_6_8 , UpperCAmelCase : List[str]=7_7 , UpperCAmelCase : List[str]=4 , UpperCAmelCase : float = 0.0 , UpperCAmelCase : str = "silu" , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : Optional[str] = "linear" , UpperCAmelCase : Optional[str] = "prd" , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : Optional[int] = None , ) -> Any:
super().__init__()
__lowerCAmelCase: Optional[Any] = num_attention_heads
__lowerCAmelCase: Tuple = attention_head_dim
__lowerCAmelCase: List[Any] = num_attention_heads * attention_head_dim
__lowerCAmelCase: Optional[int] = additional_embeddings
__lowerCAmelCase: Dict = time_embed_dim or inner_dim
__lowerCAmelCase: Optional[Any] = embedding_proj_dim or embedding_dim
__lowerCAmelCase: Union[str, Any] = clip_embed_dim or embedding_dim
__lowerCAmelCase: List[str] = Timesteps(UpperCAmelCase , UpperCAmelCase , 0 )
__lowerCAmelCase: Optional[Any] = TimestepEmbedding(UpperCAmelCase , UpperCAmelCase , out_dim=UpperCAmelCase , act_fn=UpperCAmelCase )
__lowerCAmelCase: List[str] = nn.Linear(UpperCAmelCase , UpperCAmelCase )
if embedding_proj_norm_type is None:
__lowerCAmelCase: Optional[Any] = None
elif embedding_proj_norm_type == "layer":
__lowerCAmelCase: List[Any] = nn.LayerNorm(UpperCAmelCase )
else:
raise ValueError(F'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
__lowerCAmelCase: List[str] = nn.Linear(UpperCAmelCase , UpperCAmelCase )
if encoder_hid_proj_type is None:
__lowerCAmelCase: List[Any] = None
elif encoder_hid_proj_type == "linear":
__lowerCAmelCase: str = nn.Linear(UpperCAmelCase , UpperCAmelCase )
else:
raise ValueError(F'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
__lowerCAmelCase: int = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , UpperCAmelCase ) )
if added_emb_type == "prd":
__lowerCAmelCase: Dict = nn.Parameter(torch.zeros(1 , 1 , UpperCAmelCase ) )
elif added_emb_type is None:
__lowerCAmelCase: Optional[int] = None
else:
raise ValueError(
F'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
__lowerCAmelCase: Union[str, Any] = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , dropout=UpperCAmelCase , activation_fn='gelu' , attention_bias=UpperCAmelCase , )
for d in range(UpperCAmelCase )
] )
if norm_in_type == "layer":
__lowerCAmelCase: Dict = nn.LayerNorm(UpperCAmelCase )
elif norm_in_type is None:
__lowerCAmelCase: Optional[Any] = None
else:
raise ValueError(F'''Unsupported norm_in_type: {norm_in_type}.''' )
__lowerCAmelCase: Tuple = nn.LayerNorm(UpperCAmelCase )
__lowerCAmelCase: Optional[int] = nn.Linear(UpperCAmelCase , UpperCAmelCase )
__lowerCAmelCase: str = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
__lowerCAmelCase: str = causal_attention_mask[None, ...]
self.register_buffer('causal_attention_mask' , UpperCAmelCase , persistent=UpperCAmelCase )
__lowerCAmelCase: str = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
__lowerCAmelCase: Optional[int] = nn.Parameter(torch.zeros(1 , UpperCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase ( self : Any ) -> Union[str, Any]:
__lowerCAmelCase: Tuple = {}
def fn_recursive_add_processors(UpperCAmelCase : str , UpperCAmelCase : torch.nn.Module , UpperCAmelCase : Dict[str, AttentionProcessor] ):
if hasattr(UpperCAmelCase , 'set_processor' ):
__lowerCAmelCase: Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F'''{name}.{sub_name}''' , UpperCAmelCase , UpperCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
return processors
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> Dict:
__lowerCAmelCase: List[str] = len(self.attn_processors.keys() )
if isinstance(UpperCAmelCase , UpperCAmelCase ) and len(UpperCAmelCase ) != count:
raise ValueError(
F'''A dict of processors was passed, but the number of processors {len(UpperCAmelCase )} does not match the'''
F''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(UpperCAmelCase : str , UpperCAmelCase : torch.nn.Module , UpperCAmelCase : Dict ):
if hasattr(UpperCAmelCase , 'set_processor' ):
if not isinstance(UpperCAmelCase , UpperCAmelCase ):
module.set_processor(UpperCAmelCase )
else:
module.set_processor(processor.pop(F'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F'''{name}.{sub_name}''' , UpperCAmelCase , UpperCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase )
def UpperCAmelCase ( self : Tuple ) -> List[Any]:
self.set_attn_processor(AttnProcessor() )
def UpperCAmelCase ( self : str , UpperCAmelCase : Optional[int] , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Optional[torch.FloatTensor] = None , UpperCAmelCase : Optional[torch.BoolTensor] = None , UpperCAmelCase : bool = True , ) -> Union[str, Any]:
__lowerCAmelCase: Dict = hidden_states.shape[0]
__lowerCAmelCase: Any = timestep
if not torch.is_tensor(UpperCAmelCase ):
__lowerCAmelCase: Any = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
__lowerCAmelCase: Any = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
__lowerCAmelCase: int = timesteps * torch.ones(UpperCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
__lowerCAmelCase: Dict = self.time_proj(UpperCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
__lowerCAmelCase: Tuple = timesteps_projected.to(dtype=self.dtype )
__lowerCAmelCase: List[Any] = self.time_embedding(UpperCAmelCase )
if self.embedding_proj_norm is not None:
__lowerCAmelCase: str = self.embedding_proj_norm(UpperCAmelCase )
__lowerCAmelCase: Tuple = self.embedding_proj(UpperCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
__lowerCAmelCase: Any = self.encoder_hidden_states_proj(UpperCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set' )
__lowerCAmelCase: Union[str, Any] = self.proj_in(UpperCAmelCase )
__lowerCAmelCase: str = self.positional_embedding.to(hidden_states.dtype )
__lowerCAmelCase: List[str] = []
__lowerCAmelCase: Optional[int] = 0
if encoder_hidden_states is not None:
additional_embeds.append(UpperCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
__lowerCAmelCase: Any = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
__lowerCAmelCase: int = hidden_states[:, None, :]
__lowerCAmelCase: List[Any] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
__lowerCAmelCase: str = self.prd_embedding.to(hidden_states.dtype ).expand(UpperCAmelCase , -1 , -1 )
additional_embeds.append(UpperCAmelCase )
__lowerCAmelCase: Union[str, Any] = torch.cat(
UpperCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
__lowerCAmelCase: Optional[int] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
__lowerCAmelCase: Tuple = F.pad(
UpperCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
__lowerCAmelCase: Dict = hidden_states + positional_embeddings
if attention_mask is not None:
__lowerCAmelCase: List[str] = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
__lowerCAmelCase: Optional[Any] = F.pad(UpperCAmelCase , (0, self.additional_embeddings) , value=0.0 )
__lowerCAmelCase: Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
__lowerCAmelCase: List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
__lowerCAmelCase: List[str] = self.norm_in(UpperCAmelCase )
for block in self.transformer_blocks:
__lowerCAmelCase: Optional[int] = block(UpperCAmelCase , attention_mask=UpperCAmelCase )
__lowerCAmelCase: Optional[int] = self.norm_out(UpperCAmelCase )
if self.prd_embedding is not None:
__lowerCAmelCase: Union[str, Any] = hidden_states[:, -1]
else:
__lowerCAmelCase: Tuple = hidden_states[:, additional_embeddings_len:]
__lowerCAmelCase: Optional[Any] = self.proj_to_clip_embeddings(UpperCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=UpperCAmelCase )
def UpperCAmelCase ( self : Any , UpperCAmelCase : int ) -> List[str]:
__lowerCAmelCase: str = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 322 | '''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
try:
__lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
__lowerCamelCase = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
__lowerCamelCase = teacher.config_class(**UpperCamelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 67 | 0 |
'''simple docstring'''
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
UpperCamelCase__ = get_logger()
UpperCamelCase__ = None
class lowerCamelCase_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : List[Any] , _A : int=None , _A : Dict=None , **_A : int ):
'''simple docstring'''
super().__init__(features=_A )
import jax
from jaxlib.xla_client import Device
if isinstance(_A , _A ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(_A )}, as `jaxlib.xla_extension.Device` """
'''is not serializable neither with `pickle` nor with `dill`. Instead you can surround '''
'''the device with `str()` to get its string identifier that will be internally mapped '''
'''to the actual `jaxlib.xla_extension.Device`.''' )
UpperCAmelCase__ : Union[str, Any] = device if isinstance(_A , _A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase__ : Optional[Any] = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
UpperCAmelCase__ : Tuple = str(jax.devices()[0] )
UpperCAmelCase__ : Union[str, Any] = jnp_array_kwargs
@staticmethod
def lowercase_ ( ):
'''simple docstring'''
import jax
return {str(_A ): device for device in jax.devices()}
def lowercase_ ( self : Union[str, Any] , _A : Dict ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A , _A ) and column:
if all(
isinstance(_A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(_A , axis=0 )
return column
def lowercase_ ( self : Tuple , _A : Dict ):
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(_A , (str, bytes, type(_A )) ):
return value
elif isinstance(_A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
UpperCAmelCase__ : List[Any] = {}
if isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
UpperCAmelCase__ : Optional[int] = {'''dtype''': jnp.intaa}
else:
UpperCAmelCase__ : int = {'''dtype''': jnp.intaa}
elif isinstance(_A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
UpperCAmelCase__ : int = {'''dtype''': jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(_A , PIL.Image.Image ):
UpperCAmelCase__ : Optional[int] = np.asarray(_A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
UpperCAmelCase__ : Optional[int] = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(_A , **{**default_dtype, **self.jnp_array_kwargs} )
def lowercase_ ( self : Dict , _A : int ):
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(_A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(_A , '''__array__''' ) and not isinstance(_A , jax.Array ):
UpperCAmelCase__ : Any = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(_A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
elif isinstance(_A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(_A ) for substruct in data_struct] )
return self._tensorize(_A )
def lowercase_ ( self : int , _A : dict ):
'''simple docstring'''
return map_nested(self._recursive_tensorize , _A , map_list=_A )
def lowercase_ ( self : List[str] , _A : pa.Table ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = self.numpy_arrow_extractor().extract_row(_A )
UpperCAmelCase__ : Optional[int] = self.python_features_decoder.decode_row(_A )
return self.recursive_tensorize(_A )
def lowercase_ ( self : List[str] , _A : pa.Table ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = self.numpy_arrow_extractor().extract_column(_A )
UpperCAmelCase__ : Any = self.python_features_decoder.decode_column(_A , pa_table.column_names[0] )
UpperCAmelCase__ : Union[str, Any] = self.recursive_tensorize(_A )
UpperCAmelCase__ : Optional[int] = self._consolidate(_A )
return column
def lowercase_ ( self : List[str] , _A : pa.Table ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.numpy_arrow_extractor().extract_batch(_A )
UpperCAmelCase__ : List[str] = self.python_features_decoder.decode_batch(_A )
UpperCAmelCase__ : Dict = self.recursive_tensorize(_A )
for column_name in batch:
UpperCAmelCase__ : List[Any] = self._consolidate(batch[column_name] )
return batch
| 181 | '''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : int = {
"""configuration_time_series_transformer""": [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TimeSeriesTransformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Optional[Any] = [
"""TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TimeSeriesTransformerForPrediction""",
"""TimeSeriesTransformerModel""",
"""TimeSeriesTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a_ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 75 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 67 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase):
UpperCamelCase_ = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
UpperCamelCase_ = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __A ( self : Dict , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = TextaTextGenerationPipeline(model=UpperCamelCase__ , tokenizer=UpperCamelCase__ )
return generator, ["Something to write", "Something else"]
def __A ( self : Optional[int] , UpperCamelCase__ : Any , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = generator('''Something there''' )
self.assertEqual(UpperCamelCase__ , [{'''generated_text''': ANY(UpperCamelCase__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['''generated_text'''].startswith('''Something there''' ) )
SCREAMING_SNAKE_CASE : Any = generator(['''This is great !''', '''Something else'''] , num_return_sequences=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
] , )
SCREAMING_SNAKE_CASE : Union[str, Any] = generator(
['''This is great !''', '''Something else'''] , num_return_sequences=2 , batch_size=2 , do_sample=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
[{'''generated_text''': ANY(UpperCamelCase__ )}, {'''generated_text''': ANY(UpperCamelCase__ )}],
] , )
with self.assertRaises(UpperCamelCase__ ):
generator(4 )
@require_torch
def __A ( self : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''pt''' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE : Dict = generator('''Something there''' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'''generated_text''': ''''''}] )
SCREAMING_SNAKE_CASE : str = 3
SCREAMING_SNAKE_CASE : Optional[int] = generator(
'''Something there''' , num_return_sequences=UpperCamelCase__ , num_beams=UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : int = [
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': '''Beide Beide Beide Beide Beide Beide Beide Beide'''},
{'''generated_text''': ''''''},
]
self.assertEqual(UpperCamelCase__ , UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = generator('''This is a test''' , do_sample=UpperCamelCase__ , num_return_sequences=2 , return_tensors=UpperCamelCase__ )
self.assertEqual(
UpperCamelCase__ , [
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
] , )
SCREAMING_SNAKE_CASE : str = generator.model.config.eos_token_id
SCREAMING_SNAKE_CASE : Optional[Any] = '''<pad>'''
SCREAMING_SNAKE_CASE : Tuple = generator(
['''This is a test''', '''This is a second test'''] , do_sample=UpperCamelCase__ , num_return_sequences=2 , batch_size=2 , return_tensors=UpperCamelCase__ , )
self.assertEqual(
UpperCamelCase__ , [
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
[
{'''generated_token_ids''': ANY(torch.Tensor )},
{'''generated_token_ids''': ANY(torch.Tensor )},
],
] , )
@require_tf
def __A ( self : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = pipeline('''text2text-generation''' , model='''patrickvonplaten/t5-tiny-random''' , framework='''tf''' )
# do_sample=False necessary for reproducibility
SCREAMING_SNAKE_CASE : Optional[Any] = generator('''Something there''' , do_sample=UpperCamelCase__ )
self.assertEqual(UpperCamelCase__ , [{'''generated_text''': ''''''}] )
| 182 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 0 |
"""simple docstring"""
import numpy as np
def lowercase (snake_case__ : Union[str, Any] , snake_case__ : Tuple ) -> np.ndarray:
'''simple docstring'''
return np.where(vector > 0 , UpperCamelCase__ , (alpha * (np.exp(UpperCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 155 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : Optional[int] , __lowerCamelCase : Union[str, Any] ):
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
__UpperCAmelCase : Dict = mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
__UpperCAmelCase : Optional[int] = max(
mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , mf_knapsack(i - 1 , UpperCamelCase__ , UpperCamelCase__ , j - wt[i - 1] ) + val[i - 1] , )
__UpperCAmelCase : Dict = val
return f[i][j]
def lowerCamelCase__ ( __lowerCamelCase : Optional[Any] , __lowerCamelCase : Tuple , __lowerCamelCase : List[Any] , __lowerCamelCase : Dict ):
__UpperCAmelCase : str = [[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
__UpperCAmelCase : Optional[Any] = max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
__UpperCAmelCase : Tuple = dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : Union[str, Any] ):
if not (isinstance(UpperCamelCase__ , (list, tuple) ) and isinstance(UpperCamelCase__ , (list, tuple) )):
raise ValueError(
"""Both the weights and values vectors must be either lists or tuples""" )
__UpperCAmelCase : Tuple = len(UpperCamelCase__ )
if num_items != len(UpperCamelCase__ ):
__UpperCAmelCase : Dict = (
"""The number of weights must be the same as the number of values.\n"""
f"""But got {num_items} weights and {len(UpperCamelCase__ )} values"""
)
raise ValueError(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
if not isinstance(wt[i] , UpperCamelCase__ ):
__UpperCAmelCase : int = (
"""All weights must be integers but got weight of """
f"""type {type(wt[i] )} at index {i}"""
)
raise TypeError(UpperCamelCase__ )
__UpperCAmelCase , __UpperCAmelCase : Optional[Any] = knapsack(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
__UpperCAmelCase : Dict = set()
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return optimal_val, example_optional_set
def lowerCamelCase__ ( __lowerCamelCase : str , __lowerCamelCase : int , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Any ):
# for the current item i at a maximum weight j to be part of an optimal subset,
# the optimal value at (i, j) must be greater than the optimal value at (i-1, j).
# where i - 1 means considering only the previous items at the given maximum weight
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , UpperCamelCase__ , UpperCamelCase__ )
else:
optimal_set.add(UpperCamelCase__ )
_construct_solution(UpperCamelCase__ , UpperCamelCase__ , i - 1 , j - wt[i - 1] , UpperCamelCase__ )
if __name__ == "__main__":
a : Any = [3, 2, 4, 4]
a : Dict = [4, 3, 2, 3]
a : List[Any] = 4
a : Optional[int] = 6
a : Any = [[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
a ,a : Any = knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
a ,a : Any = knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print("optimal_value = ", optimal_solution)
print("An optimal subset corresponding to the optimal value", optimal_subset)
| 114 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 0 |
def lowercase( UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = hex_num.strip()
if not hex_num:
raise ValueError("""No value was passed to the function""" )
UpperCamelCase = hex_num[0] == """-"""
if is_negative:
UpperCamelCase = hex_num[1:]
try:
UpperCamelCase = int(UpperCamelCase__ , 16 )
except ValueError:
raise ValueError("""Invalid value was passed to the function""" )
UpperCamelCase = """"""
while int_num > 0:
UpperCamelCase = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(("""-""" + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 343 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67 | 0 |
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def UpperCamelCase_( _snake_case : Optional[Any] , _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : List[Any] , _snake_case : List[str] ):
"""simple docstring"""
with open(UpperCamelCase__ ) as metadata_file:
__a =json.load(UpperCamelCase__ )
__a =LukeConfig(use_entity_aware_attention=UpperCamelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
__a =torch.load(UpperCamelCase__ , map_location='cpu' )
# Load the entity vocab file
__a =load_entity_vocab(UpperCamelCase__ )
__a =RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
__a =AddedToken('<ent>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__a =AddedToken('<ent2>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(F'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(UpperCamelCase__ )
with open(os.path.join(UpperCamelCase__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
__a =LukeTokenizer.from_pretrained(UpperCamelCase__ )
# Initialize the embeddings of the special tokens
__a =state_dict['embeddings.word_embeddings.weight']
__a =word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
__a =word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
__a =torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
__a =F'encoder.layer.{layer_index}.attention.self.'
__a =state_dict[prefix + matrix_name]
__a =state_dict[prefix + matrix_name]
__a =state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
__a =state_dict['entity_embeddings.entity_embeddings.weight']
__a =entity_emb[entity_vocab['[MASK]']]
__a =LukeModel(config=UpperCamelCase__ ).eval()
__a , __a =model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
if not (len(UpperCamelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(F'Missing keys {", ".join(UpperCamelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
F' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
__a =LukeTokenizer.from_pretrained(UpperCamelCase__ , task='entity_classification' )
__a =(
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
__a =(39, 42)
__a =tokenizer(UpperCamelCase__ , entity_spans=[span] , add_prefix_space=UpperCamelCase__ , return_tensors='pt' )
__a =model(**UpperCamelCase__ )
# Verify word hidden states
if model_size == "large":
__a =torch.Size((1, 42, 1024) )
__a =torch.tensor(
[[0.0_133, 0.0_865, 0.0_095], [0.3_093, -0.2_576, -0.7_418], [-0.1_720, -0.2_117, -0.2_869]] )
else: # base
__a =torch.Size((1, 42, 768) )
__a =torch.tensor([[0.0_037, 0.1_368, -0.0_091], [0.1_099, 0.3_329, -0.1_095], [0.0_765, 0.5_335, 0.1_179]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
F'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
__a =torch.Size((1, 1, 1024) )
__a =torch.tensor([[0.0_466, -0.0_106, -0.0_179]] )
else: # base
__a =torch.Size((1, 1, 768) )
__a =torch.tensor([[0.1_457, 0.1_044, 0.0_174]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
F'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
F' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(UpperCamelCase__ ) )
model.save_pretrained(UpperCamelCase__ )
def UpperCamelCase_( _snake_case : Tuple ):
"""simple docstring"""
__a ={}
with open(UpperCamelCase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(UpperCamelCase__ ):
__a , __a =line.rstrip().split('\t' )
__a =index
return entity_vocab
if __name__ == "__main__":
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
_lowerCAmelCase : Optional[int] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 218 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
while second != 0:
UpperCamelCase = first & second
first ^= second
UpperCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase : int = int(input("Enter the first number: ").strip())
_lowerCamelCase : Tuple = int(input("Enter the second number: ").strip())
print(f'''{add(first, second) = }''')
| 28 | '''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 0 |
def __lowercase ( lowerCamelCase : Optional[Any] ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and number_of_steps > 0
), F"number_of_steps needs to be positive integer, your input {number_of_steps}"
if number_of_steps == 1:
return 1
UpperCamelCase_, UpperCamelCase_ : List[str] = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase_, UpperCamelCase_ : Any = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 175 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase ={
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
__UpperCAmelCase ="▁"
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =["input_ids", "attention_mask"]
lowerCamelCase : Union[str, Any] =BarthezTokenizer
def __init__( self : Optional[Any] , a : Dict=None , a : str=None , a : List[Any]="<s>" , a : Optional[int]="</s>" , a : List[str]="</s>" , a : Tuple="<s>" , a : str="<unk>" , a : Any="<pad>" , a : Union[str, Any]="<mask>" , **a : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 67 | 0 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =[
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(UpperCamelCase__ , UpperCamelCase__ )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__: Dict =emb.weight.shape
lowerCamelCase__: Optional[int] =nn.Linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
lowerCamelCase__: int =emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =torch.load(UpperCamelCase__ , map_location="cpu" )
lowerCamelCase__: Optional[Any] =mam_aaa["args"] or mam_aaa["cfg"]["model"]
lowerCamelCase__: int =mam_aaa["model"]
remove_ignore_keys_(UpperCamelCase__ )
lowerCamelCase__: Tuple =state_dict["encoder.embed_tokens.weight"].shape[0]
lowerCamelCase__: List[Any] =MaMaaaConfig(
vocab_size=UpperCamelCase__ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , )
lowerCamelCase__: Tuple =state_dict["decoder.embed_tokens.weight"]
lowerCamelCase__: List[str] =MaMaaaForConditionalGeneration(UpperCamelCase__ )
model.model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
lowerCamelCase__: Any =make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
__A = parser.parse_args()
__A = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 10 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 0 |
def _a ( SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def _a ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 322 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 0 |
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class lowerCamelCase_ ( UpperCAmelCase__ ):
def lowercase_ ( self : Any ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = tempfile.mkdtemp()
UpperCAmelCase__ : Optional[Any] = 8
# DPR tok
UpperCAmelCase__ : Dict = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , '''dpr_tokenizer''' )
os.makedirs(_A , exist_ok=_A )
UpperCAmelCase__ : Tuple = os.path.join(_A , DPR_VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
# BART tok
UpperCAmelCase__ : Tuple = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
UpperCAmelCase__ : str = dict(zip(_A , range(len(_A ) ) ) )
UpperCAmelCase__ : Union[str, Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
UpperCAmelCase__ : Union[str, Any] = {'''unk_token''': '''<unk>'''}
UpperCAmelCase__ : List[str] = os.path.join(self.tmpdirname , '''bart_tokenizer''' )
os.makedirs(_A , exist_ok=_A )
UpperCAmelCase__ : Union[str, Any] = os.path.join(_A , BART_VOCAB_FILES_NAMES['''vocab_file'''] )
UpperCAmelCase__ : Dict = os.path.join(_A , BART_VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_A ) )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''dpr_tokenizer''' ) )
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , '''bart_tokenizer''' ) )
def lowercase_ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_dummy_dataset()
UpperCAmelCase__ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase__ : str = dataset
UpperCAmelCase__ : Optional[Any] = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def lowercase_ ( self : str , _A : bool ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_dummy_dataset()
UpperCAmelCase__ : str = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''custom''' , )
if from_disk:
UpperCAmelCase__ : Optional[int] = os.path.join(self.tmpdirname , '''dataset''' )
UpperCAmelCase__ : Optional[Any] = os.path.join(self.tmpdirname , '''index.faiss''' )
dataset.get_index('''embeddings''' ).save(os.path.join(self.tmpdirname , '''index.faiss''' ) )
dataset.drop_index('''embeddings''' )
dataset.save_to_disk(os.path.join(self.tmpdirname , '''dataset''' ) )
del dataset
UpperCAmelCase__ : Union[str, Any] = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
UpperCAmelCase__ : str = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , _A ) , )
return retriever
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Any = Dataset.from_dict(
{
'''id''': ['''0''', '''1'''],
'''text''': ['''foo''', '''bar'''],
'''title''': ['''Foo''', '''Bar'''],
'''embeddings''': [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index('''embeddings''' , string_factory='''Flat''' , metric_type=faiss.METRIC_INNER_PRODUCT )
UpperCAmelCase__ : int = os.path.join(self.tmpdirname , '''hf_bert_base.hnswSQ8_correct_phi_128.c_index''' )
dataset.save_faiss_index('''embeddings''' , index_file_name + '''.index.dpr''' )
pickle.dump(dataset['''id'''] , open(index_file_name + '''.index_meta.dpr''' , '''wb''' ) )
UpperCAmelCase__ : List[Any] = os.path.join(self.tmpdirname , '''psgs_w100.tsv.pkl''' )
UpperCAmelCase__ : Optional[Any] = {sample['''id''']: [sample['''text'''], sample['''title''']] for sample in dataset}
pickle.dump(_A , open(_A , '''wb''' ) )
UpperCAmelCase__ : List[str] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name='''legacy''' , index_path=self.tmpdirname , )
UpperCAmelCase__ : Dict = RagRetriever(
_A , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = 1
UpperCAmelCase__ : List[str] = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ : List[str] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch('''transformers.models.rag.retrieval_rag.load_dataset''' ) as mock_load_dataset:
UpperCAmelCase__ : List[str] = self.get_dummy_dataset()
retriever.save_pretrained(_A )
UpperCAmelCase__ : Optional[int] = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Tuple = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : str = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
UpperCAmelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Dict = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
UpperCAmelCase__ : List[str] = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Dict = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
UpperCAmelCase__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''embeddings''', '''id''', '''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''id'''] ) , _A )
self.assertEqual(doc_dicts[0]['''id'''][0] , '''1''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''id'''][0] , '''0''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowercase_ ( self : str ):
'''simple docstring'''
UpperCAmelCase__ : int = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
UpperCAmelCase__ : Union[str, Any] = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Tuple = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Optional[Any] = self.get_dummy_legacy_index_retriever()
UpperCAmelCase__ : str = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = retriever.retrieve(_A , n_docs=_A )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(_A ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ['''text''', '''title'''] )
self.assertEqual(len(doc_dicts[0]['''text'''] ) , _A )
self.assertEqual(doc_dicts[0]['''text'''][0] , '''bar''' ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]['''text'''][0] , '''foo''' ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(_A )
UpperCAmelCase__ : Dict = RagRetriever.from_pretrained(_A )
self.assertIsInstance(_A , _A )
UpperCAmelCase__ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Any = retriever.retrieve(_A , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase_ ( self : str ):
'''simple docstring'''
import torch
UpperCAmelCase__ : Any = 1
UpperCAmelCase__ : Tuple = self.get_dummy_canonical_hf_index_retriever()
UpperCAmelCase__ : List[Any] = [[5, 7], [10, 11]]
UpperCAmelCase__ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Optional[Any] = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : int = (
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A , _A )
self.assertIsInstance(_A , _A )
self.assertIsInstance(_A , np.ndarray )
UpperCAmelCase__ : List[Any] = retriever(
_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A , return_tensors='''pt''' , )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = ( # noqa: F841
out['''context_input_ids'''],
out['''context_attention_mask'''],
out['''retrieved_doc_embeds'''],
out['''doc_ids'''],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(_A , torch.Tensor )
self.assertIsInstance(_A , torch.Tensor )
self.assertIsInstance(_A , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Tuple = self.get_dpr_ctx_encoder_tokenizer()
UpperCAmelCase__ : Dict = 1
UpperCAmelCase__ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=_A )
retriever.set_ctx_encoder_tokenizer(_A )
UpperCAmelCase__ : int = [[5, 7], [10, 11]]
UpperCAmelCase__ : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
UpperCAmelCase__ : Union[str, Any] = retriever(_A , _A , prefix=retriever.config.generator.prefix , n_docs=_A )
self.assertEqual(
len(_A ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ('''tokenized_doc_ids''', '''tokenized_doc_attention_mask''') ) , _A ) # check for doc token related keys in dictionary.
| 181 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 0 |
'''simple docstring'''
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def a_ ( __snake_case : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase_ =botoa.client('''iam''' )
lowerCamelCase_ ={
'''Version''': '''2012-10-17''',
'''Statement''': [
{'''Effect''': '''Allow''', '''Principal''': {'''Service''': '''sagemaker.amazonaws.com'''}, '''Action''': '''sts:AssumeRole'''}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=UpperCamelCase__ , AssumeRolePolicyDocument=json.dumps(UpperCamelCase__ , indent=2 ) )
lowerCamelCase_ ={
'''Version''': '''2012-10-17''',
'''Statement''': [
{
'''Effect''': '''Allow''',
'''Action''': [
'''sagemaker:*''',
'''ecr:GetDownloadUrlForLayer''',
'''ecr:BatchGetImage''',
'''ecr:BatchCheckLayerAvailability''',
'''ecr:GetAuthorizationToken''',
'''cloudwatch:PutMetricData''',
'''cloudwatch:GetMetricData''',
'''cloudwatch:GetMetricStatistics''',
'''cloudwatch:ListMetrics''',
'''logs:CreateLogGroup''',
'''logs:CreateLogStream''',
'''logs:DescribeLogStreams''',
'''logs:PutLogEvents''',
'''logs:GetLogEvents''',
'''s3:CreateBucket''',
'''s3:ListBucket''',
'''s3:GetBucketLocation''',
'''s3:GetObject''',
'''s3:PutObject''',
],
'''Resource''': '''*''',
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=UpperCamelCase__ , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(UpperCamelCase__ , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def a_ ( __snake_case : List[str] ) -> Any:
"""simple docstring"""
lowerCamelCase_ =botoa.client('''iam''' )
return iam_client.get_role(RoleName=UpperCamelCase__ )["Role"]["Arn"]
def a_ ( ) -> str:
"""simple docstring"""
lowerCamelCase_ =_ask_options(
'''How do you want to authorize?''' , ['''AWS Profile''', '''Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) '''] , UpperCamelCase__ , )
lowerCamelCase_ =None
if credentials_configuration == 0:
lowerCamelCase_ =_ask_field('''Enter your AWS Profile name: [default] ''' , default='''default''' )
lowerCamelCase_ =aws_profile
else:
print(
'''Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,'''
'''`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`''' )
lowerCamelCase_ =_ask_field('''AWS Access Key ID: ''' )
lowerCamelCase_ =aws_access_key_id
lowerCamelCase_ =_ask_field('''AWS Secret Access Key: ''' )
lowerCamelCase_ =aws_secret_access_key
lowerCamelCase_ =_ask_field('''Enter your AWS Region: [us-east-1]''' , default='''us-east-1''' )
lowerCamelCase_ =aws_region
lowerCamelCase_ =_ask_options(
'''Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?''' , ['''Provide IAM Role name''', '''Create new IAM role using credentials'''] , UpperCamelCase__ , )
if role_management == 0:
lowerCamelCase_ =_ask_field('''Enter your IAM role name: ''' )
else:
lowerCamelCase_ ='''accelerate_sagemaker_execution_role'''
print(F'''Accelerate will create an iam role \"{iam_role_name}\" using the provided credentials''' )
_create_iam_role_for_sagemaker(UpperCamelCase__ )
lowerCamelCase_ =_ask_field(
'''Do you want to use custom Docker image? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
lowerCamelCase_ =None
if is_custom_docker_image:
lowerCamelCase_ =_ask_field('''Enter your Docker image: ''' , lambda __snake_case : str(UpperCamelCase__ ).lower() )
lowerCamelCase_ =_ask_field(
'''Do you want to provide SageMaker input channels with data locations? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
lowerCamelCase_ =None
if is_sagemaker_inputs_enabled:
lowerCamelCase_ =_ask_field(
'''Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): ''' , lambda __snake_case : str(UpperCamelCase__ ).lower() , )
lowerCamelCase_ =_ask_field(
'''Do you want to enable SageMaker metrics? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
lowerCamelCase_ =None
if is_sagemaker_metrics_enabled:
lowerCamelCase_ =_ask_field(
'''Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): ''' , lambda __snake_case : str(UpperCamelCase__ ).lower() , )
lowerCamelCase_ =_ask_options(
'''What is the distributed mode?''' , ['''No distributed training''', '''Data parallelism'''] , _convert_sagemaker_distributed_mode , )
lowerCamelCase_ ={}
lowerCamelCase_ =_ask_field(
'''Do you wish to optimize your script with torch dynamo?[yes/NO]:''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
if use_dynamo:
lowerCamelCase_ ='''dynamo_'''
lowerCamelCase_ =_ask_options(
'''Which dynamo backend would you like to use?''' , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
lowerCamelCase_ =_ask_field(
'''Do you want to customize the defaults sent to torch.compile? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
if use_custom_options:
lowerCamelCase_ =_ask_options(
'''Which mode do you want to use?''' , UpperCamelCase__ , lambda __snake_case : TORCH_DYNAMO_MODES[int(UpperCamelCase__ )] , default='''default''' , )
lowerCamelCase_ =_ask_field(
'''Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
lowerCamelCase_ =_ask_field(
'''Do you want to enable dynamic shape tracing? [yes/NO]: ''' , _convert_yes_no_to_bool , default=UpperCamelCase__ , error_message='''Please enter yes or no.''' , )
lowerCamelCase_ ='''Which EC2 instance type you want to use for your training?'''
if distributed_type != SageMakerDistributedType.NO:
lowerCamelCase_ =_ask_options(
UpperCamelCase__ , UpperCamelCase__ , lambda __snake_case : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(UpperCamelCase__ )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
lowerCamelCase_ =_ask_field(UpperCamelCase__ , lambda __snake_case : str(UpperCamelCase__ ).lower() , default='''ml.p3.2xlarge''' )
lowerCamelCase_ =1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
lowerCamelCase_ =_ask_field(
'''How many machines do you want use? [1]: ''' , UpperCamelCase__ , default=1 , )
lowerCamelCase_ =_ask_options(
'''Do you wish to use FP16 or BF16 (mixed precision)?''' , ['''no''', '''fp16''', '''bf16''', '''fp8'''] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
'''Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.''' )
return SageMakerConfig(
image_uri=UpperCamelCase__ , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=UpperCamelCase__ , use_cpu=UpperCamelCase__ , dynamo_config=UpperCamelCase__ , eca_instance_type=UpperCamelCase__ , profile=UpperCamelCase__ , region=UpperCamelCase__ , iam_role_name=UpperCamelCase__ , mixed_precision=UpperCamelCase__ , num_machines=UpperCamelCase__ , sagemaker_inputs_file=UpperCamelCase__ , sagemaker_metrics_file=UpperCamelCase__ , )
| 75 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 | 0 |
import argparse
import torch
from transformers import FunnelBaseModel, FunnelConfig, FunnelModel, load_tf_weights_in_funnel
from transformers.utils import logging
logging.set_verbosity_info()
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE : Any = FunnelConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
SCREAMING_SNAKE_CASE : Union[str, Any] = FunnelBaseModel(UpperCamelCase__ ) if base_model else FunnelModel(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_funnel(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--base_model', action='store_true', help='Whether you want just the base model (no decoder) or not.'
)
__UpperCamelCase : Tuple = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.base_model
)
| 182 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
requires_backends(self , '''vision''' )
self.check_model_type(a )
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model(**a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(a )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 67 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
_a = IFPipeline
_a = TEXT_TO_IMAGE_PARAMS - {"width", "height", "latents"}
_a = TEXT_TO_IMAGE_BATCH_PARAMS
_a = PipelineTesterMixin.required_optional_params - {"latents"}
def __lowercase ( self : List[str] ):
return self._get_dummy_components()
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str]=0 ):
if str(lowerCAmelCase ).startswith("""mps""" ):
lowerCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
lowerCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def __lowercase ( self : Dict ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def __lowercase ( self : Optional[int] ):
super().test_save_load_floataa(expected_max_diff=1e-1 )
def __lowercase ( self : Union[str, Any] ):
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def __lowercase ( self : int ):
self._test_save_load_local()
def __lowercase ( self : Optional[int] ):
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def __lowercase ( self : Optional[int] ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def __lowercase ( self : List[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : Optional[Any] ):
lowerCAmelCase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
lowerCAmelCase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
lowerCAmelCase , lowerCAmelCase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowerCAmelCase = None
lowerCAmelCase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowerCAmelCase = IFImgaImgPipeline(**pipe_a.components )
lowerCAmelCase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowerCAmelCase = IFInpaintingPipeline(**pipe_a.components )
lowerCAmelCase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : List[str] ):
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = pipe_a(
prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , num_inference_steps=2 , generator=lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 13 * 10**9
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCAmelCase = pipe_a(
prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : int ):
_start_torch_memory_measurement()
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = pipe_a(
prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , num_inference_steps=2 , generator=lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCAmelCase = pipe_a(
prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , original_image=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def __lowercase ( self : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Tuple ):
_start_torch_memory_measurement()
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(1 ) ).to(lowerCAmelCase )
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = pipe_a(
prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , num_inference_steps=2 , generator=lowerCAmelCase , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (64, 64, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 10 * 10**9
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
# pipeline 2
_start_torch_memory_measurement()
lowerCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
lowerCAmelCase = floats_tensor((1, 3, 64, 64) , rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(0 ) ).to(lowerCAmelCase )
lowerCAmelCase = floats_tensor((1, 3, 256, 256) , rng=random.Random(1 ) ).to(lowerCAmelCase )
lowerCAmelCase = pipe_a(
prompt_embeds=lowerCAmelCase , negative_prompt_embeds=lowerCAmelCase , image=lowerCAmelCase , mask_image=lowerCAmelCase , original_image=lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="""np""" , )
lowerCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
lowerCAmelCase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 10**9
lowerCAmelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def lowercase () -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 155 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class a :
"""simple docstring"""
def __init__( self : Tuple , __lowercase : Optional[int] , __lowercase : Optional[Any]=13 , __lowercase : List[Any]=64 , __lowercase : str=2 , __lowercase : int=3 , __lowercase : Union[str, Any]=True , __lowercase : List[str]=True , __lowercase : str=32 , __lowercase : List[Any]=5 , __lowercase : int=4 , __lowercase : Optional[int]=37 , __lowercase : Any="gelu" , __lowercase : List[Any]=0.1 , __lowercase : Tuple=0.1 , __lowercase : int=10 , __lowercase : Optional[int]=0.02 , __lowercase : int=[1, 16, 4, 4] , __lowercase : Dict=None , ) -> Dict:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : List[str] = image_size
__UpperCAmelCase : int = patch_size
__UpperCAmelCase : Any = num_channels
__UpperCAmelCase : Any = is_training
__UpperCAmelCase : Dict = use_labels
__UpperCAmelCase : Dict = hidden_size
__UpperCAmelCase : int = num_hidden_layers
__UpperCAmelCase : Dict = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
__UpperCAmelCase : Any = attention_probs_dropout_prob
__UpperCAmelCase : Dict = type_sequence_label_size
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : Optional[int] = scope
__UpperCAmelCase : Optional[int] = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__UpperCAmelCase : Tuple = (self.image_size // 32) ** 2
__UpperCAmelCase : str = num_patches + 1
def UpperCAmelCase ( self : Optional[Any] ) -> Dict:
__UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : int = None
if self.use_labels:
__UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase : List[str] = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase ( self : Dict ) -> Dict:
__UpperCAmelCase : Dict = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [4, 8, 16, 32],
"""num_groups""": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__lowercase , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=__lowercase , )
def UpperCAmelCase ( self : List[str] , __lowercase : Any , __lowercase : Optional[int] , __lowercase : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : int = ViTHybridModel(config=__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Union[str, Any] = model(__lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase ( self : Any , __lowercase : Dict , __lowercase : Dict , __lowercase : Union[str, Any] ) -> List[Any]:
__UpperCAmelCase : Any = self.type_sequence_label_size
__UpperCAmelCase : Any = ViTHybridForImageClassification(__lowercase )
model.to(__lowercase )
model.eval()
__UpperCAmelCase : Tuple = model(__lowercase , labels=__lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase ( self : Any ) -> Tuple:
__UpperCAmelCase : Any = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : Dict = config_and_inputs
__UpperCAmelCase : Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
a : Optional[Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
a : Tuple = (
{"feature-extraction": ViTHybridModel, "image-classification": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
a : List[Any] = False
a : Tuple = False
a : List[str] = False
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__UpperCAmelCase : Any = ViTHybridModelTester(self )
__UpperCAmelCase : Union[str, Any] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase , hidden_size=37 )
def UpperCAmelCase ( self : str ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase ( self : List[str] ) -> Union[str, Any]:
pass
def UpperCAmelCase ( self : Any ) -> List[str]:
__UpperCAmelCase , __UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Union[str, Any] = model_class(__lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Union[str, Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowercase , nn.Linear ) )
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Any = model_class(__lowercase )
__UpperCAmelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : List[str] = [*signature.parameters.keys()]
__UpperCAmelCase : Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __lowercase )
def UpperCAmelCase ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase )
def UpperCAmelCase ( self : str ) -> Union[str, Any]:
__UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowercase )
def UpperCAmelCase ( self : str ) -> str:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Dict = _config_zero_init(__lowercase )
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(config=__lowercase )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__UpperCAmelCase : Any = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def UpperCAmelCase ( self : Any ) -> Optional[Any]:
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Tuple = ViTHybridModel.from_pretrained(__lowercase )
self.assertIsNotNone(__lowercase )
def lowerCamelCase__ ( ):
__UpperCAmelCase : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase ( self : Union[str, Any] ) -> int:
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase ( self : Any ) -> Optional[int]:
__UpperCAmelCase : Union[str, Any] = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__lowercase )
__UpperCAmelCase : Any = self.default_image_processor
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : int = image_processor(images=__lowercase , return_tensors="""pt""" ).to(__lowercase )
# forward pass
with torch.no_grad():
__UpperCAmelCase : int = model(**__lowercase )
# verify the logits
__UpperCAmelCase : Optional[int] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __lowercase )
__UpperCAmelCase : List[Any] = torch.tensor([-1.9_090, -0.4_993, -0.2_389] ).to(__lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __lowercase , atol=1e-4 ) )
@slow
@require_accelerate
def UpperCAmelCase ( self : str ) -> Dict:
__UpperCAmelCase : Tuple = ViTHybridImageProcessor.from_pretrained("""google/vit-hybrid-base-bit-384""" )
__UpperCAmelCase : Any = ViTHybridForImageClassification.from_pretrained("""google/vit-hybrid-base-bit-384""" , device_map="""auto""" )
__UpperCAmelCase : List[str] = prepare_img()
__UpperCAmelCase : int = image_processor(images=__lowercase , return_tensors="""pt""" )
__UpperCAmelCase : Optional[int] = model(**__lowercase )
__UpperCAmelCase : int = outputs.logits
# model predicts one of the 1000 ImageNet classes
__UpperCAmelCase : Union[str, Any] = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , """tabby, tabby cat""" )
| 114 | '''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 | 0 |
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square(UpperCamelCase_ , UpperCamelCase_ ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
UpperCamelCase = update_area_of_max_square(UpperCamelCase__ , col + 1 )
UpperCamelCase = update_area_of_max_square(row + 1 , col + 1 )
UpperCamelCase = update_area_of_max_square(row + 1 , UpperCamelCase__ )
if mat[row][col]:
UpperCamelCase = 1 + min([right, diagonal, down] )
UpperCamelCase = max(largest_square_area[0] , UpperCamelCase__ )
return sub_problem_sol
else:
return 0
UpperCamelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
def update_area_of_max_square_using_dp_array(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
UpperCamelCase = update_area_of_max_square_using_dp_array(UpperCamelCase__ , col + 1 , UpperCamelCase__ )
UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , UpperCamelCase__ )
UpperCamelCase = update_area_of_max_square_using_dp_array(row + 1 , UpperCamelCase__ , UpperCamelCase__ )
if mat[row][col]:
UpperCamelCase = 1 + min([right, diagonal, down] )
UpperCamelCase = max(largest_square_area[0] , UpperCamelCase__ )
UpperCamelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
UpperCamelCase = [0]
UpperCamelCase = [[-1] * cols for _ in range(UpperCamelCase__ )]
update_area_of_max_square_using_dp_array(0 , 0 , UpperCamelCase__ )
return largest_square_area[0]
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
UpperCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCamelCase = dp_array[row][col + 1]
UpperCamelCase = dp_array[row + 1][col + 1]
UpperCamelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
UpperCamelCase = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = max(dp_array[row][col] , UpperCamelCase__ )
else:
UpperCamelCase = 0
return largest_square_area
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
'''simple docstring'''
UpperCamelCase = [0] * (cols + 1)
UpperCamelCase = [0] * (cols + 1)
UpperCamelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
UpperCamelCase = current_row[col + 1]
UpperCamelCase = next_row[col + 1]
UpperCamelCase = next_row[col]
if mat[row][col] == 1:
UpperCamelCase = 1 + min(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
UpperCamelCase = max(current_row[col] , UpperCamelCase__ )
else:
UpperCamelCase = 0
UpperCamelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 343 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCAmelCase : Any = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
_lowerCAmelCase : Tuple = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
_lowerCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 218 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 0 |
'''simple docstring'''
from __future__ import annotations
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Tuple , UpperCamelCase__ : str , UpperCamelCase__ : str ):
"""simple docstring"""
UpperCamelCase , UpperCamelCase = text, pattern
UpperCamelCase , UpperCamelCase = len(UpperCamelCase__ ), len(UpperCamelCase__ )
def A ( self : Tuple , UpperCamelCase__ : str ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A ( self : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A ( self : Any ):
"""simple docstring"""
UpperCamelCase = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCamelCase = self.mismatch_in_text(UpperCamelCase__ )
if mismatch_index == -1:
positions.append(UpperCamelCase__ )
else:
UpperCamelCase = self.match_in_pattern(self.text[mismatch_index] )
UpperCamelCase = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowerCamelCase : List[str] = "ABAABA"
_lowerCamelCase : str = "AB"
_lowerCamelCase : Union[str, Any] = BoyerMooreSearch(text, pattern)
_lowerCamelCase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 28 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="gpt_neox_japanese"
def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
| 67 | 0 |
import math
from collections.abc import Callable
def __lowercase ( lowerCamelCase : Dict , lowerCamelCase : Tuple , lowerCamelCase : Dict ):
UpperCamelCase_ : Optional[int] = xa
UpperCamelCase_ : List[str] = xa
while True:
if x_n == x_na or function(UpperCamelCase__ ) == function(UpperCamelCase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
UpperCamelCase_ : Optional[Any] = x_na - (
function(UpperCamelCase__ ) / ((function(UpperCamelCase__ ) - function(UpperCamelCase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
UpperCamelCase_ : Any = x_na
UpperCamelCase_ : List[str] = x_na
def __lowercase ( lowerCamelCase : Dict ):
return math.pow(UpperCamelCase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 175 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =10
lowerCamelCase__: List[str] =datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
lowerCamelCase__: int =datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(UpperCamelCase__ ) ),
} , features=UpperCamelCase__ , )
return dataset
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=UpperCamelCase__ )
return filename
# FILE_CONTENT + files
__A = "\\n Text data.\n Second line of data."
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =tmp_path_factory.mktemp("data" ) / "file.txt"
lowerCamelCase__: List[str] =FILE_CONTENT
with open(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ )
return filename
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
import bza
lowerCamelCase__: List[Any] =tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
lowerCamelCase__: List[Any] =bytes(UpperCamelCase__ , "utf-8" )
with bza.open(UpperCamelCase__ , "wb" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
import gzip
lowerCamelCase__: Any =str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
lowerCamelCase__: Union[str, Any] =bytes(UpperCamelCase__ , "utf-8" )
with gzip.open(UpperCamelCase__ , "wb" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
if datasets.config.LZ4_AVAILABLE:
import lza.frame
lowerCamelCase__: Optional[int] =tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
lowerCamelCase__: str =bytes(UpperCamelCase__ , "utf-8" )
with lza.frame.open(UpperCamelCase__ , "wb" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
lowerCamelCase__: int =tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(UpperCamelCase__ , "w" ) as archive:
archive.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
import tarfile
lowerCamelCase__: List[Any] =tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(UpperCamelCase__ , "w" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
import lzma
lowerCamelCase__: Union[str, Any] =tmp_path_factory.mktemp("data" ) / "file.txt.xz"
lowerCamelCase__: List[Any] =bytes(UpperCamelCase__ , "utf-8" )
with lzma.open(UpperCamelCase__ , "wb" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
import zipfile
lowerCamelCase__: Dict =tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
lowerCamelCase__: List[Any] =tmp_path_factory.mktemp("data" ) / "file.txt.zst"
lowerCamelCase__: List[str] =bytes(UpperCamelCase__ , "utf-8" )
with zstd.open(UpperCamelCase__ , "wb" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: List[str] =tmp_path_factory.mktemp("data" ) / "file.xml"
lowerCamelCase__: str =textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ )
return filename
__A = [
{"col_1": "0", "col_2": 0, "col_3": 0.0},
{"col_1": "1", "col_2": 1, "col_3": 1.0},
{"col_1": "2", "col_2": 2, "col_3": 2.0},
{"col_1": "3", "col_2": 3, "col_3": 3.0},
]
__A = [
{"col_1": "4", "col_2": 4, "col_3": 4.0},
{"col_1": "5", "col_2": 5, "col_3": 5.0},
]
__A = {
"col_1": ["0", "1", "2", "3"],
"col_2": [0, 1, 2, 3],
"col_3": [0.0, 1.0, 2.0, 3.0],
}
__A = [
{"col_3": 0.0, "col_1": "0", "col_2": 0},
{"col_3": 1.0, "col_1": "1", "col_2": 1},
]
__A = [
{"col_1": "s0", "col_2": 0, "col_3": 0.0},
{"col_1": "s1", "col_2": 1, "col_3": 1.0},
{"col_1": "s2", "col_2": 2, "col_3": 2.0},
{"col_1": "s3", "col_2": 3, "col_3": 3.0},
]
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Any:
"""simple docstring"""
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =datasets.Dataset.from_dict(UpperCamelCase__ )
lowerCamelCase__: List[Any] =str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(UpperCamelCase__ ) ) as con:
lowerCamelCase__: Union[str, Any] =con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Dict =str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(UpperCamelCase__ , "w" , newline="" ) as f:
lowerCamelCase__: Tuple =csv.DictWriter(UpperCamelCase__ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(UpperCamelCase__ , "w" , newline="" ) as f:
lowerCamelCase__: Union[str, Any] =csv.DictWriter(UpperCamelCase__ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
import bza
lowerCamelCase__: Optional[int] =tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(UpperCamelCase__ , "rb" ) as f:
lowerCamelCase__: str =f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(UpperCamelCase__ , "wb" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
lowerCamelCase__: Optional[Any] =pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(UpperCamelCase__ , "wb" ) as f:
lowerCamelCase__: Dict =pq.ParquetWriter(UpperCamelCase__ , schema=UpperCamelCase__ )
lowerCamelCase__: List[str] =pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(UpperCamelCase__ ) )] for k in DATA[0]} , schema=UpperCamelCase__ )
writer.write_table(UpperCamelCase__ )
writer.close()
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowerCamelCase__: Any ={"data": DATA}
with open(UpperCamelCase__ , "w" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Tuple:
"""simple docstring"""
lowerCamelCase__: List[Any] =str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
lowerCamelCase__: str ={"data": DATA_DICT_OF_LISTS}
with open(UpperCamelCase__ , "w" ) as f:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(UpperCamelCase__ , "w" ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(UpperCamelCase__ , "w" ) as f:
for item in DATA:
f.write(json.dumps(UpperCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Dict:
"""simple docstring"""
lowerCamelCase__: List[str] =str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(UpperCamelCase__ , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(UpperCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Optional[int]:
"""simple docstring"""
lowerCamelCase__: Optional[int] =str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(UpperCamelCase__ , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(UpperCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
import gzip
lowerCamelCase__: Optional[Any] =str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(UpperCamelCase__ , "rb" ) as orig_file:
with gzip.open(UpperCamelCase__ , "wb" ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> int:
"""simple docstring"""
import gzip
lowerCamelCase__: Tuple =str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(UpperCamelCase__ , "rb" ) as orig_file:
with gzip.open(UpperCamelCase__ , "wb" ) as zipped_file:
zipped_file.writelines(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("nested" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Any =tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(UpperCamelCase__ , "w" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.add(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(UpperCamelCase__ , "w" ) as f:
f.add(UpperCamelCase__ , arcname=os.path.join("nested" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Any:
"""simple docstring"""
lowerCamelCase__: Optional[Any] =["0", "1", "2", "3"]
lowerCamelCase__: Any =str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(UpperCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: List[str] =["0", "1", "2", "3"]
lowerCamelCase__: List[Any] =str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(UpperCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Dict =["0", "1", "2", "3"]
lowerCamelCase__: Dict =tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(UpperCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: Tuple =tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(UpperCamelCase__ ) ) )
f.write(UpperCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(UpperCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a , __a ) -> Union[str, Any]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename("unsupported.ext" ) )
f.write(UpperCamelCase__ , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Optional[int] ="\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
lowerCamelCase__: Dict =str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(UpperCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(UpperCamelCase__ )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( ) -> Optional[Any]:
"""simple docstring"""
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a , __a ) -> Optional[Any]:
"""simple docstring"""
lowerCamelCase__: str =tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(UpperCamelCase__ , "w" ) as f:
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ) )
f.write(UpperCamelCase__ , arcname=os.path.basename(UpperCamelCase__ ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def lowerCAmelCase_ ( __a ) -> int:
"""simple docstring"""
lowerCamelCase__: List[str] =tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_a = {
'''configuration_bloom''': ['''BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BloomConfig''', '''BloomOnnxConfig'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''BloomTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BloomForCausalLM''',
'''BloomModel''',
'''BloomPreTrainedModel''',
'''BloomForSequenceClassification''',
'''BloomForTokenClassification''',
'''BloomForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 322 | '''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
try:
__lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
__lowerCamelCase = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
__lowerCamelCase = teacher.config_class(**UpperCamelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 67 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
class lowerCamelCase_ ( UpperCAmelCase__ ):
lowerCAmelCase__ = "timm_backbone"
def __init__( self : Optional[int] , _A : int=None , _A : Dict=3 , _A : Any=True , _A : List[str]=True , _A : Optional[Any]=None , **_A : Optional[int] , ):
'''simple docstring'''
super().__init__(**_A )
UpperCAmelCase__ : List[Any] = backbone
UpperCAmelCase__ : List[Any] = num_channels
UpperCAmelCase__ : Any = features_only
UpperCAmelCase__ : List[Any] = use_pretrained_backbone
UpperCAmelCase__ : Optional[int] = True
UpperCAmelCase__ : Union[str, Any] = out_indices if out_indices is not None else (-1,)
| 181 | '''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67 | 0 |
'''simple docstring'''
from __future__ import annotations
def a_ ( __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : List[str] , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 67 | 0 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__UpperCamelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCamelCase : int = {
'google/umt5-small': 'https://huggingface.co/google/umt5-small/resolve/main/config.json',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase__ ( UpperCAmelCase__):
UpperCamelCase_ = "umt5"
UpperCamelCase_ = ["past_key_values"]
def __init__( self : Optional[Any] , UpperCamelCase__ : Optional[Any]=25_0112 , UpperCamelCase__ : str=512 , UpperCamelCase__ : Union[str, Any]=64 , UpperCamelCase__ : Union[str, Any]=1024 , UpperCamelCase__ : Dict=8 , UpperCamelCase__ : Any=None , UpperCamelCase__ : str=6 , UpperCamelCase__ : Optional[int]=32 , UpperCamelCase__ : List[str]=128 , UpperCamelCase__ : Optional[int]=0.1 , UpperCamelCase__ : Any=1E-6 , UpperCamelCase__ : List[Any]=1.0 , UpperCamelCase__ : int="gated-gelu" , UpperCamelCase__ : str=True , UpperCamelCase__ : Dict=True , UpperCamelCase__ : Optional[int]="T5Tokenizer" , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Any=1 , UpperCamelCase__ : Union[str, Any]=0 , **UpperCamelCase__ : str , ):
'''simple docstring'''
super().__init__(
is_encoder_decoder=UpperCamelCase__ , tokenizer_class=UpperCamelCase__ , tie_word_embeddings=UpperCamelCase__ , pad_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , decoder_start_token_id=UpperCamelCase__ , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : List[Any] = vocab_size
SCREAMING_SNAKE_CASE : str = d_model
SCREAMING_SNAKE_CASE : Optional[Any] = d_kv
SCREAMING_SNAKE_CASE : Any = d_ff
SCREAMING_SNAKE_CASE : int = num_layers
SCREAMING_SNAKE_CASE : Optional[int] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE : Tuple = num_heads
SCREAMING_SNAKE_CASE : int = relative_attention_num_buckets
SCREAMING_SNAKE_CASE : Dict = relative_attention_max_distance
SCREAMING_SNAKE_CASE : Optional[int] = dropout_rate
SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon
SCREAMING_SNAKE_CASE : List[Any] = initializer_factor
SCREAMING_SNAKE_CASE : List[Any] = feed_forward_proj
SCREAMING_SNAKE_CASE : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE : Dict = self.feed_forward_proj.split('''-''' )
SCREAMING_SNAKE_CASE : Any = act_info[-1]
SCREAMING_SNAKE_CASE : List[str] = act_info[0] == '''gated'''
if len(UpperCamelCase__ ) > 1 and act_info[0] != "gated" or len(UpperCamelCase__ ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE : Any = '''gelu_new'''
@property
def __A ( self : List[str] ):
'''simple docstring'''
return self.d_model
@property
def __A ( self : Union[str, Any] ):
'''simple docstring'''
return self.num_heads
@property
def __A ( self : Optional[Any] ):
'''simple docstring'''
return self.num_layers
class lowercase__ ( UpperCAmelCase__):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __A ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
SCREAMING_SNAKE_CASE : List[str] = '''past_encoder_sequence + sequence'''
SCREAMING_SNAKE_CASE : List[Any] = {0: '''batch'''}
SCREAMING_SNAKE_CASE : Tuple = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
SCREAMING_SNAKE_CASE : int = {0: '''batch''', 1: '''decoder_sequence'''}
SCREAMING_SNAKE_CASE : Any = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCamelCase__ , direction='''inputs''' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __A ( self : Dict ):
'''simple docstring'''
return 13
@property
def __A ( self : Tuple ):
'''simple docstring'''
return 5E-4
| 182 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
a = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
a = '</w>'
a = '@@ '
def lowercase (snake_case__ : Any ) -> str:
'''simple docstring'''
lowerCAmelCase = set()
lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
lowerCAmelCase = char
return pairs
# Speech2Text2 has no max input length
a = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_a = VOCAB_FILES_NAMES
_a = PRETRAINED_VOCAB_FILES_MAP
_a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_a = ["input_ids", "attention_mask"]
def __init__( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int="<s>" , lowerCAmelCase : Any="<pad>" , lowerCAmelCase : Union[str, Any]="</s>" , lowerCAmelCase : Optional[Any]="<unk>" , lowerCAmelCase : List[Any]=False , lowerCAmelCase : Tuple=None , **lowerCAmelCase : str , ):
super().__init__(
unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , do_lower_case=lowerCAmelCase , **lowerCAmelCase , )
lowerCAmelCase = do_lower_case
with open(lowerCAmelCase , encoding="""utf-8""" ) as vocab_handle:
lowerCAmelCase = json.load(lowerCAmelCase )
lowerCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f'''No merges files provided. {self.__class__.__name__} can only be used for decoding.''' )
lowerCAmelCase = None
lowerCAmelCase = None
else:
with open(lowerCAmelCase , encoding="""utf-8""" ) as merges_handle:
lowerCAmelCase = merges_handle.read().split("""\n""" )[:-1]
lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
lowerCAmelCase = dict(zip(lowerCAmelCase , range(len(lowerCAmelCase ) ) ) )
lowerCAmelCase = {}
@property
def __lowercase ( self : int ):
return len(self.decoder )
def __lowercase ( self : List[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowercase ( self : Any , lowerCAmelCase : List[str] ):
lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
lowerCAmelCase = get_pairs(lowerCAmelCase )
if not pairs:
return token
while True:
lowerCAmelCase = min(lowerCAmelCase , key=lambda lowerCAmelCase : self.bpe_ranks.get(lowerCAmelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
lowerCAmelCase , lowerCAmelCase = bigram
lowerCAmelCase = []
lowerCAmelCase = 0
while i < len(lowerCAmelCase ):
try:
lowerCAmelCase = word.index(lowerCAmelCase , lowerCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
lowerCAmelCase = j
if word[i] == first and i < len(lowerCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
lowerCAmelCase = tuple(lowerCAmelCase )
lowerCAmelCase = new_word
if len(lowerCAmelCase ) == 1:
break
else:
lowerCAmelCase = get_pairs(lowerCAmelCase )
lowerCAmelCase = """ """.join(lowerCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
lowerCAmelCase = """\n""" + BPE_TOKEN_MERGES
if word.endswith(lowerCAmelCase ):
lowerCAmelCase = word.replace(lowerCAmelCase , """""" )
lowerCAmelCase = word.replace(""" """ , lowerCAmelCase )
lowerCAmelCase = word
return word
def __lowercase ( self : Optional[Any] , lowerCAmelCase : Any ):
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
lowerCAmelCase = text.lower()
lowerCAmelCase = text.split()
lowerCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(lowerCAmelCase ).split(""" """ ) ) )
return split_tokens
def __lowercase ( self : List[Any] , lowerCAmelCase : str ):
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def __lowercase ( self : Optional[Any] , lowerCAmelCase : int ):
lowerCAmelCase = self.decoder.get(lowerCAmelCase , self.unk_token )
return result
def __lowercase ( self : Union[str, Any] , lowerCAmelCase : List[str] ):
lowerCAmelCase = """ """.join(lowerCAmelCase )
# make sure @@ tokens are concatenated
lowerCAmelCase = """""".join(string.split(lowerCAmelCase ) )
return string
def __lowercase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[str] = None ):
if not os.path.isdir(lowerCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCAmelCase = os.path.join(
lowerCAmelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCAmelCase , ensure_ascii=lowerCAmelCase ) + """\n""" )
lowerCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(lowerCAmelCase , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merges_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
lowerCAmelCase = token_index
writer.write(""" """.join(lowerCAmelCase ) + """\n""" )
index += 1
return (vocab_file, merges_file)
| 155 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | 0 |
def lowerCamelCase__ ( __lowerCamelCase : List[str] ):
try:
__UpperCAmelCase : List[Any] = float(UpperCamelCase__ )
except ValueError:
raise ValueError("""Please enter a valid number""" )
__UpperCAmelCase : List[Any] = decimal - int(UpperCamelCase__ )
if fractional_part == 0:
return int(UpperCamelCase__ ), 1
else:
__UpperCAmelCase : Tuple = len(str(UpperCamelCase__ ).split(""".""" )[1] )
__UpperCAmelCase : Tuple = int(decimal * (10**number_of_frac_digits) )
__UpperCAmelCase : Optional[int] = 10**number_of_frac_digits
__UpperCAmelCase , __UpperCAmelCase : int = denominator, numerator
while True:
__UpperCAmelCase : List[str] = dividend % divisor
if remainder == 0:
break
__UpperCAmelCase , __UpperCAmelCase : Any = divisor, remainder
__UpperCAmelCase , __UpperCAmelCase : str = numerator / divisor, denominator / divisor
return int(UpperCamelCase__ ), int(UpperCamelCase__ )
if __name__ == "__main__":
print(f"""{decimal_to_fraction(2) = }""")
print(f"""{decimal_to_fraction(89.0) = }""")
print(f"""{decimal_to_fraction("67") = }""")
print(f"""{decimal_to_fraction("45.0") = }""")
print(f"""{decimal_to_fraction(1.5) = }""")
print(f"""{decimal_to_fraction("6.25") = }""")
print(f"""{decimal_to_fraction("78td") = }""")
| 114 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 0 |
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
_SCREAMING_SNAKE_CASE = open # noqa: we just need to have a builtin inside this module to test it properly
| 343 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67 | 0 |
from __future__ import annotations
import requests
_lowerCAmelCase : Dict = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def UpperCamelCase_( _snake_case : str , _snake_case : Optional[int] = 1 , _snake_case : List[Any] = "new" , _snake_case : Union[str, Any] = None ):
"""simple docstring"""
__a =wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(UpperCamelCase__ ) - valid_terms ) ):
__a =F'Invalid search term: {invalid_search_terms}'
raise ValueError(UpperCamelCase__ )
__a =requests.get(
F'https://reddit.com/r/{subreddit}/{age}.json?limit={limit}' , headers={'User-agent': 'A random string'} , )
if response.status_code == 429:
raise requests.HTTPError
__a =response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(UpperCamelCase__ )}
__a ={}
for id_ in range(UpperCamelCase__ ):
__a ={
item: data['data']['children'][id_]['data'][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 218 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = "mra"
def __init__( self : List[str] , UpperCamelCase__ : int=5_0_2_6_5 , UpperCamelCase__ : Union[str, Any]=7_6_8 , UpperCamelCase__ : List[Any]=1_2 , UpperCamelCase__ : str=1_2 , UpperCamelCase__ : int=3_0_7_2 , UpperCamelCase__ : str="gelu" , UpperCamelCase__ : List[str]=0.1 , UpperCamelCase__ : Union[str, Any]=0.1 , UpperCamelCase__ : Optional[int]=5_1_2 , UpperCamelCase__ : Union[str, Any]=1 , UpperCamelCase__ : List[Any]=0.0_2 , UpperCamelCase__ : str=1E-5 , UpperCamelCase__ : int="absolute" , UpperCamelCase__ : int=4 , UpperCamelCase__ : str="full" , UpperCamelCase__ : Optional[Any]=0 , UpperCamelCase__ : Any=0 , UpperCamelCase__ : List[str]=1 , UpperCamelCase__ : Union[str, Any]=0 , UpperCamelCase__ : Union[str, Any]=2 , **UpperCamelCase__ : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=UpperCamelCase__ , bos_token_id=UpperCamelCase__ , eos_token_id=UpperCamelCase__ , **UpperCamelCase__ )
UpperCamelCase = vocab_size
UpperCamelCase = max_position_embeddings
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = type_vocab_size
UpperCamelCase = layer_norm_eps
UpperCamelCase = position_embedding_type
UpperCamelCase = block_per_row
UpperCamelCase = approx_mode
UpperCamelCase = initial_prior_first_n_blocks
UpperCamelCase = initial_prior_diagonal_n_blocks
| 28 | '''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowercase ( lowerCamelCase : List[str] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
a_ = parser.parse_args()
a_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 175 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase ={
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
__UpperCAmelCase ="▁"
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =["input_ids", "attention_mask"]
lowerCamelCase : Union[str, Any] =BarthezTokenizer
def __init__( self : Optional[Any] , a : Dict=None , a : str=None , a : List[Any]="<s>" , a : Optional[int]="</s>" , a : List[str]="</s>" , a : Tuple="<s>" , a : str="<unk>" , a : Any="<pad>" , a : Union[str, Any]="<mask>" , **a : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 67 | 0 |
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def lowerCAmelCase_ ( ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: Dict =argparse.ArgumentParser()
parser.add_argument(
"-m" , "--pretrained_model_name_or_path" , type=UpperCamelCase__ , default=UpperCamelCase__ , required=UpperCamelCase__ , help="Path to pretrained model or model identifier from huggingface.co/models." , )
parser.add_argument(
"-c" , "--caption" , type=UpperCamelCase__ , default="robotic cat with wings" , help="Text used to generate images." , )
parser.add_argument(
"-n" , "--images_num" , type=UpperCamelCase__ , default=4 , help="How much images to generate." , )
parser.add_argument(
"-s" , "--seed" , type=UpperCamelCase__ , default=42 , help="Seed for random process." , )
parser.add_argument(
"-ci" , "--cuda_id" , type=UpperCamelCase__ , default=0 , help="cuda_id." , )
lowerCamelCase__: str =parser.parse_args()
return args
def lowerCAmelCase_ ( __a , __a , __a ) -> Dict:
"""simple docstring"""
if not len(UpperCamelCase__ ) == rows * cols:
raise ValueError("The specified number of rows and columns are not correct." )
lowerCamelCase__ , lowerCamelCase__: List[Any] =imgs[0].size
lowerCamelCase__: Dict =Image.new("RGB" , size=(cols * w, rows * h) )
lowerCamelCase__ , lowerCamelCase__: Any =grid.size
for i, img in enumerate(UpperCamelCase__ ):
grid.paste(UpperCamelCase__ , box=(i % cols * w, i // cols * h) )
return grid
def lowerCAmelCase_ ( __a , __a="robotic cat with wings" , __a=7.5 , __a=50 , __a=1 , __a=42 , ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =torch.Generator(pipeline.device ).manual_seed(UpperCamelCase__ )
lowerCamelCase__: Optional[Any] =pipeline(
UpperCamelCase__ , guidance_scale=UpperCamelCase__ , num_inference_steps=UpperCamelCase__ , generator=UpperCamelCase__ , num_images_per_prompt=UpperCamelCase__ , ).images
lowerCamelCase__: int =int(math.sqrt(UpperCamelCase__ ) )
lowerCamelCase__: int =image_grid(UpperCamelCase__ , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__A = parse_args()
# Load models and create wrapper for stable diffusion
__A = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
__A = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
__A = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
__A = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
__A = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__A = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, "best_model.pt")):
__A = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, "unet", unet)
else:
__A = unet.to(torch.device("cuda", args.cuda_id))
__A = pipeline.to(unet.device)
__A , __A = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, "{}.png".format("_".join(args.caption.split()))))
__A = os.path.join(args.pretrained_model_name_or_path, "_".join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, "{}.png".format(idx + 1)))
| 10 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def _a ( SCREAMING_SNAKE_CASE : List[str] ) -> int:
"""simple docstring"""
__lowerCAmelCase: Any = SwinConfig(image_size=1_92 )
if "base" in model_name:
__lowerCAmelCase: Optional[Any] = 6
__lowerCAmelCase: Any = 1_28
__lowerCAmelCase: Optional[Any] = (2, 2, 18, 2)
__lowerCAmelCase: Optional[int] = (4, 8, 16, 32)
elif "large" in model_name:
__lowerCAmelCase: List[Any] = 12
__lowerCAmelCase: Optional[Any] = 1_92
__lowerCAmelCase: str = (2, 2, 18, 2)
__lowerCAmelCase: Union[str, Any] = (6, 12, 24, 48)
else:
raise ValueError('Model not supported, only supports base and large variants' )
__lowerCAmelCase: List[str] = window_size
__lowerCAmelCase: List[str] = embed_dim
__lowerCAmelCase: int = depths
__lowerCAmelCase: Any = num_heads
return config
def _a ( SCREAMING_SNAKE_CASE : Optional[int] ) -> Tuple:
"""simple docstring"""
if "encoder.mask_token" in name:
__lowerCAmelCase: Tuple = name.replace('encoder.mask_token' , 'embeddings.mask_token' )
if "encoder.patch_embed.proj" in name:
__lowerCAmelCase: List[Any] = name.replace('encoder.patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "encoder.patch_embed.norm" in name:
__lowerCAmelCase: int = name.replace('encoder.patch_embed.norm' , 'embeddings.norm' )
if "attn.proj" in name:
__lowerCAmelCase: Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__lowerCAmelCase: Optional[Any] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__lowerCAmelCase: Dict = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__lowerCAmelCase: List[str] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__lowerCAmelCase: str = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__lowerCAmelCase: Dict = name.replace('mlp.fc2' , 'output.dense' )
if name == "encoder.norm.weight":
__lowerCAmelCase: Any = 'layernorm.weight'
if name == "encoder.norm.bias":
__lowerCAmelCase: int = 'layernorm.bias'
if "decoder" in name:
pass
else:
__lowerCAmelCase: int = 'swin.' + name
return name
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__lowerCAmelCase: Any = orig_state_dict.pop(UpperCamelCase__ )
if "attn_mask" in key:
pass
elif "qkv" in key:
__lowerCAmelCase: Optional[Any] = key.split('.' )
__lowerCAmelCase: Optional[int] = int(key_split[2] )
__lowerCAmelCase: List[str] = int(key_split[4] )
__lowerCAmelCase: Dict = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
__lowerCAmelCase: Dict = val[:dim, :]
__lowerCAmelCase: Union[str, Any] = val[
dim : dim * 2, :
]
__lowerCAmelCase: str = val[-dim:, :]
else:
__lowerCAmelCase: Tuple = val[
:dim
]
__lowerCAmelCase: Optional[Any] = val[
dim : dim * 2
]
__lowerCAmelCase: int = val[
-dim:
]
else:
__lowerCAmelCase: Any = val
return orig_state_dict
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : Optional[int] ) -> Dict:
"""simple docstring"""
__lowerCAmelCase: Dict = torch.load(UpperCamelCase__ , map_location='cpu' )['model']
__lowerCAmelCase: Optional[int] = get_swin_config(UpperCamelCase__ )
__lowerCAmelCase: List[Any] = SwinForMaskedImageModeling(UpperCamelCase__ )
model.eval()
__lowerCAmelCase: Any = convert_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ )
__lowerCAmelCase: List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase: str = ViTImageProcessor(size={'height': 1_92, 'width': 1_92} )
__lowerCAmelCase: Tuple = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCAmelCase: int = image_processor(images=UpperCamelCase__ , return_tensors='pt' )
with torch.no_grad():
__lowerCAmelCase: List[str] = model(**UpperCamelCase__ ).logits
print(outputs.keys() )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'''Pushing model and image processor for {model_name} to hub''' )
model.push_to_hub(f'''microsoft/{model_name}''' )
image_processor.push_to_hub(f'''microsoft/{model_name}''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_a = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 322 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 0 |
'''simple docstring'''
from __future__ import annotations
from random import random
from typing import Generic, TypeVar
UpperCamelCase__ = TypeVar('''KT''')
UpperCamelCase__ = TypeVar('''VT''')
class lowerCamelCase_ ( Generic[KT, VT] ):
def __init__( self : int , _A : KT | str = "root" , _A : VT | None = None ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = key
UpperCAmelCase__ : Optional[int] = value
UpperCAmelCase__ : Tuple = []
def __repr__( self : Any ):
'''simple docstring'''
return f"""Node({self.key}: {self.value})"""
@property
def lowercase_ ( self : int ):
'''simple docstring'''
return len(self.forward )
class lowerCamelCase_ ( Generic[KT, VT] ):
def __init__( self : List[Any] , _A : float = 0.5 , _A : int = 16 ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = Node[KT, VT]()
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = p
UpperCAmelCase__ : Optional[Any] = max_level
def __str__( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ : int = list(self )
if len(_A ) == 0:
return f"""SkipList(level={self.level})"""
UpperCAmelCase__ : Union[str, Any] = max((len(str(_A ) ) for item in items) , default=4 )
UpperCAmelCase__ : Optional[Any] = max(_A , 4 ) + 4
UpperCAmelCase__ : List[str] = self.head
UpperCAmelCase__ : Dict = []
UpperCAmelCase__ : Dict = node.forward.copy()
lines.append(f"""[{node.key}]""".ljust(_A , '''-''' ) + '''* ''' * len(_A ) )
lines.append(''' ''' * label_size + '''| ''' * len(_A ) )
while len(node.forward ) != 0:
UpperCAmelCase__ : Tuple = node.forward[0]
lines.append(
f"""[{node.key}]""".ljust(_A , '''-''' )
+ ''' '''.join(str(n.key ) if n.key == node.key else '''|''' for n in forwards ) )
lines.append(''' ''' * label_size + '''| ''' * len(_A ) )
UpperCAmelCase__ : Tuple = node.forward
lines.append('''None'''.ljust(_A ) + '''* ''' * len(_A ) )
return f"""SkipList(level={self.level})\n""" + "\n".join(_A )
def __iter__( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : str = self.head
while len(node.forward ) != 0:
yield node.forward[0].key
UpperCAmelCase__ : Optional[Any] = node.forward[0]
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = 1
while random() < self.p and level < self.max_level:
level += 1
return level
def lowercase_ ( self : Optional[Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = []
UpperCAmelCase__ : List[Any] = self.head
for i in reversed(range(self.level ) ):
# i < node.level - When node level is lesser than `i` decrement `i`.
# node.forward[i].key < key - Jumping to node with key value higher
# or equal to searched key would result
# in skipping searched key.
while i < node.level and node.forward[i].key < key:
UpperCAmelCase__ : Optional[Any] = node.forward[i]
# Each leftmost node (relative to searched node) will potentially have to
# be updated.
update_vector.append(_A )
update_vector.reverse() # Note that we were inserting values in reverse order.
# len(node.forward) != 0 - If current node doesn't contain any further
# references then searched key is not present.
# node.forward[0].key == key - Next node key should be equal to search key
# if key is present.
if len(node.forward ) != 0 and node.forward[0].key == key:
return node.forward[0], update_vector
else:
return None, update_vector
def lowercase_ ( self : List[str] , _A : KT ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = self._locate_node(_A )
if node is not None:
for i, update_node in enumerate(_A ):
# Remove or replace all references to removed node.
if update_node.level > i and update_node.forward[i].key == key:
if node.level > i:
UpperCAmelCase__ : int = node.forward[i]
else:
UpperCAmelCase__ : str = update_node.forward[:i]
def lowercase_ ( self : Dict , _A : KT , _A : VT ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self._locate_node(_A )
if node is not None:
UpperCAmelCase__ : Tuple = value
else:
UpperCAmelCase__ : Optional[Any] = self.random_level()
if level > self.level:
# After level increase we have to add additional nodes to head.
for _ in range(self.level - 1 , _A ):
update_vector.append(self.head )
UpperCAmelCase__ : Any = level
UpperCAmelCase__ : Tuple = Node(_A , _A )
for i, update_node in enumerate(update_vector[:level] ):
# Change references to pass through new node.
if update_node.level > i:
new_node.forward.append(update_node.forward[i] )
if update_node.level < i + 1:
update_node.forward.append(_A )
else:
UpperCAmelCase__ : int = new_node
def lowercase_ ( self : Any , _A : VT ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : str = self._locate_node(_A )
if node is not None:
return node.value
return None
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : Dict = SkipList()
skip_list.insert('''Key1''' , 3 )
skip_list.insert('''Key2''' , 12 )
skip_list.insert('''Key3''' , 41 )
skip_list.insert('''Key4''' , -19 )
UpperCAmelCase__ : Tuple = skip_list.head
UpperCAmelCase__ : int = {}
while node.level != 0:
UpperCAmelCase__ : List[Any] = node.forward[0]
UpperCAmelCase__ : Optional[int] = node.value
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 3
assert all_values["Key2"] == 12
assert all_values["Key3"] == 41
assert all_values["Key4"] == -19
def a__ ( ) -> Any:
UpperCAmelCase__ : Dict = SkipList()
skip_list.insert('''Key1''' , 10 )
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''Key5''' , 7 )
skip_list.insert('''Key7''' , 10 )
skip_list.insert('''Key10''' , 5 )
skip_list.insert('''Key7''' , 7 )
skip_list.insert('''Key5''' , 5 )
skip_list.insert('''Key10''' , 10 )
UpperCAmelCase__ : int = skip_list.head
UpperCAmelCase__ : List[Any] = {}
while node.level != 0:
UpperCAmelCase__ : List[Any] = node.forward[0]
UpperCAmelCase__ : Union[str, Any] = node.value
if len(UpperCamelCase__ ) != 4:
print()
assert len(UpperCamelCase__ ) == 4
assert all_values["Key1"] == 12
assert all_values["Key7"] == 7
assert all_values["Key5"] == 5
assert all_values["Key10"] == 10
def a__ ( ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = SkipList()
assert skip_list.find('''Some key''' ) is None
def a__ ( ) -> str:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.insert('''Key2''' , 20 )
assert skip_list.find('''Key2''' ) == 20
skip_list.insert('''Some Key''' , 10 )
skip_list.insert('''Key2''' , 8 )
skip_list.insert('''V''' , 13 )
assert skip_list.find('''Y''' ) is None
assert skip_list.find('''Key2''' ) == 8
assert skip_list.find('''Some Key''' ) == 10
assert skip_list.find('''V''' ) == 13
def a__ ( ) -> int:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.delete('''Some key''' )
assert len(skip_list.head.forward ) == 0
def a__ ( ) -> int:
UpperCAmelCase__ : Tuple = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''Key2''' ) is None
def a__ ( ) -> List[str]:
UpperCAmelCase__ : str = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 14 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''V''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) == 14
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''X''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) == 12
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key1''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) == 15
skip_list.delete('''Key2''' )
assert skip_list.find('''V''' ) is None
assert skip_list.find('''X''' ) is None
assert skip_list.find('''Key1''' ) is None
assert skip_list.find('''Key2''' ) is None
def a__ ( ) -> List[Any]:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.insert('''Key1''' , 12 )
skip_list.insert('''V''' , 13 )
skip_list.insert('''X''' , 1_42 )
skip_list.insert('''Key2''' , 15 )
skip_list.delete('''X''' )
def traverse_keys(lowerCAmelCase__ ):
yield node.key
for forward_node in node.forward:
yield from traverse_keys(UpperCamelCase__ )
assert len(set(traverse_keys(skip_list.head ) ) ) == 4
def a__ ( ) -> Dict:
def is_sorted(lowerCAmelCase__ ):
return all(next_item >= item for item, next_item in zip(UpperCamelCase__ , lst[1:] ) )
UpperCAmelCase__ : int = SkipList()
for i in range(10 ):
skip_list.insert(UpperCamelCase__ , UpperCamelCase__ )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.delete(5 )
skip_list.delete(8 )
skip_list.delete(2 )
assert is_sorted(list(UpperCamelCase__ ) )
skip_list.insert(-12 , -12 )
skip_list.insert(77 , 77 )
assert is_sorted(list(UpperCamelCase__ ) )
def a__ ( ) -> int:
for _ in range(1_00 ):
# Repeat test 100 times due to the probabilistic nature of skip list
# random values == random bugs
test_insert()
test_insert_overrides_existing_value()
test_searching_empty_list_returns_none()
test_search()
test_deleting_item_from_empty_list_do_nothing()
test_deleted_items_are_not_founded_by_find_method()
test_delete_removes_only_given_key()
test_delete_doesnt_leave_dead_nodes()
test_iter_always_yields_sorted_values()
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Optional[Any] = SkipList()
skip_list.insert(2 , '''2''' )
skip_list.insert(4 , '''4''' )
skip_list.insert(6 , '''4''' )
skip_list.insert(4 , '''5''' )
skip_list.insert(8 , '''4''' )
skip_list.insert(9 , '''4''' )
skip_list.delete(4 )
print(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 181 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 0 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __UpperCamelCase :
lowercase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase : Optional[str] =field(
default='NER' , metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase : bool =field(default=UpperCAmelCase__ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
lowercase : str =field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} , )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : bool =field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> int:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('''.json''' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
lowerCamelCase_ =import_module('''tasks''' )
try:
lowerCamelCase_ =getattr(UpperCamelCase__ , model_args.task_type )
lowerCamelCase_ =token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'''Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '''
F'''Available tasks classes are: {TokenClassificationTask.__subclasses__()}''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCamelCase_ =token_classification_task.get_labels(data_args.labels )
lowerCamelCase_ =dict(enumerate(UpperCamelCase__ ) )
lowerCamelCase_ =len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , cache_dir=model_args.cache_dir , )
lowerCamelCase_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCamelCase_ =AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase_ =(
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase_ =(
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(__snake_case : List[Any] , __snake_case : Union[str, Any] ) -> Tuple[List[int], List[int]]:
lowerCamelCase_ =np.argmax(UpperCamelCase__ , axis=2 )
lowerCamelCase_, lowerCamelCase_ =preds.shape
lowerCamelCase_ =[[] for _ in range(UpperCamelCase__ )]
lowerCamelCase_ =[[] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(__snake_case : Dict ) -> Dict:
lowerCamelCase_, lowerCamelCase_ =align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"precision": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"recall": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"f1": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
# Data collator
lowerCamelCase_ =DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase_ =Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase_ ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ =trainer.evaluate()
lowerCamelCase_ =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase__ )
# Predict
if training_args.do_predict:
lowerCamelCase_ =TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =trainer.predict(UpperCamelCase__ )
lowerCamelCase_, lowerCamelCase_ =align_predictions(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ =os.path.join(training_args.output_dir , '''test_results.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
for key, value in metrics.items():
logger.info(''' %s = %s''' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('''%s = %s\n''' % (key, value) )
# Save predictions
lowerCamelCase_ =os.path.join(training_args.output_dir , '''test_predictions.txt''' )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , '''w''' ) as writer:
with open(os.path.join(data_args.data_dir , '''test.txt''' ) , '''r''' ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return results
def a_ ( __snake_case : Dict ) -> str:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 75 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 | 0 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
__UpperCamelCase : Dict = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
__UpperCamelCase : Optional[Any] = []
__UpperCamelCase : List[str] = []
__UpperCamelCase : Union[str, Any] = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
__UpperCamelCase : List[Any] = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get('TEST_TYPE', '')} test results""",
'emoji': True,
},
}
]
__UpperCamelCase : Union[str, Any] = 0
for log in Path().glob('*.log'):
__UpperCamelCase : List[str] = 0
with open(log, 'r') as f:
for line in f:
__UpperCamelCase : List[Any] = json.loads(line)
if line.get('nodeid', '') != "":
__UpperCamelCase : Union[str, Any] = line['nodeid']
if line.get('duration', None) is not None:
__UpperCamelCase : int = f"""{line['duration']:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
__UpperCamelCase : List[str] = []
log.unlink()
__UpperCamelCase : List[str] = ''
__UpperCamelCase : Optional[int] = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
__UpperCamelCase : List[str] = []
__UpperCamelCase : List[str] = {}
for test in failed_tests:
__UpperCamelCase : int = test[0].split('::')
__UpperCamelCase : str = data[0].split('/')[-1]
if data[0] not in filesafailed:
__UpperCamelCase : int = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
__UpperCamelCase : Optional[int] = [test[0] for test in failed_table]
__UpperCamelCase : str = list(set(files))
# Count number of instances in failed_tests
__UpperCamelCase : Dict = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
__UpperCamelCase : Dict = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3000:
__UpperCamelCase : List[Any] = 'Too many failed tests, please see the full report in the Action results.'
__UpperCamelCase : Any = len(err) + 10
__UpperCamelCase : List[str] = message[: 3000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
__UpperCamelCase : Tuple = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
__UpperCamelCase : int = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
__UpperCamelCase : Union[str, Any] = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
__UpperCamelCase : int = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ['GITHUB_REPOSITORY']}/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
payload.append(action_button)
__UpperCamelCase : Optional[int] = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get('TEST_TYPE')} test results for {date.today()}""",
}
],
}
payload.append(date_report)
__UpperCamelCase : Optional[int] = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
__UpperCamelCase : List[str] = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
__UpperCamelCase : List[Any] = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
__UpperCamelCase : Tuple = row[0]
else:
__UpperCamelCase : Any = ''
__UpperCamelCase : Dict = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=['Class', 'Test'], tablefmt=hf_table_format, stralign='right')}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 182 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
requires_backends(self , '''vision''' )
self.check_model_type(a )
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model(**a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(a )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 67 | 0 |
"""simple docstring"""
import os
a = {'I': 1, 'V': 5, 'X': 1_0, 'L': 5_0, 'C': 1_0_0, 'D': 5_0_0, 'M': 1_0_0_0}
def lowercase (snake_case__ : List[Any] ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
lowerCAmelCase = 0
while index < len(UpperCamelCase__ ) - 1:
lowerCAmelCase = SYMBOLS[numerals[index]]
lowerCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def lowercase (snake_case__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase = """"""
lowerCAmelCase = num // 1_000
numerals += m_count * "M"
num %= 1_000
lowerCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
lowerCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def lowercase (snake_case__ : Any = "/p089_roman.txt" ) -> int:
'''simple docstring'''
lowerCAmelCase = 0
with open(os.path.dirname(UpperCamelCase__ ) + roman_numerals_filename ) as filea:
lowerCAmelCase = filea.readlines()
for line in lines:
lowerCAmelCase = line.strip()
lowerCAmelCase = parse_roman_numerals(UpperCamelCase__ )
lowerCAmelCase = generate_roman_numerals(UpperCamelCase__ )
savings += len(UpperCamelCase__ ) - len(UpperCamelCase__ )
return savings
if __name__ == "__main__":
print(f"""{solution() = }""")
| 155 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
a : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
a : Union[str, Any] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print("\n".join(upper_files) + "\n")
a : Any = [file for file in filepaths if " " in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print("\n".join(space_files) + "\n")
a : str = [file for file in filepaths if "-" in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print("\n".join(hyphen_files) + "\n")
a : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print("\n".join(nodir_files) + "\n")
a : List[Any] = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 114 | '''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 | 0 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Any ):
"""simple docstring"""
torch.manual_seed(0 )
UpperCamelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def lowerCamelCase_ ( self : Dict ):
"""simple docstring"""
UpperCamelCase = self.dummy_uncond_unet
UpperCamelCase = KarrasVeScheduler()
UpperCamelCase = KarrasVePipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""numpy""" ).images
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=2 , generator=lowerCamelCase_ , output_type="""numpy""" , return_dict=lowerCamelCase_ )[0]
UpperCamelCase = image[0, -3:, -3:, -1]
UpperCamelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCamelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
def lowerCamelCase_ ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = """google/ncsnpp-celebahq-256"""
UpperCamelCase = UNetaDModel.from_pretrained(lowerCamelCase_ )
UpperCamelCase = KarrasVeScheduler()
UpperCamelCase = KarrasVePipeline(unet=lowerCamelCase_ , scheduler=lowerCamelCase_ )
pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
UpperCamelCase = torch.manual_seed(0 )
UpperCamelCase = pipe(num_inference_steps=20 , generator=lowerCamelCase_ , output_type="""numpy""" ).images
UpperCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
UpperCamelCase = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 343 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __magic_name__ ( UpperCAmelCase__ ):
SCREAMING_SNAKE_CASE = 42
SCREAMING_SNAKE_CASE = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 218 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 0 |
'''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
_lowerCamelCase : Optional[Any] = True
except (ImportError, ModuleNotFoundError):
_lowerCamelCase : Dict = False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCamelCase ( A__ ) -> str:
"""simple docstring"""
re.sub('<n>' , '' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 28 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="gpt_neox_japanese"
def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
| 67 | 0 |
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class _lowercase ( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : str , snake_case : float , snake_case : Callable , snake_case : int , snake_case : float = 1.0 , snake_case : str = None , ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCamelCase_ : Any = initial_learning_rate
UpperCamelCase_ : List[Any] = warmup_steps
UpperCamelCase_ : Optional[Any] = power
UpperCamelCase_ : Union[str, Any] = decay_schedule_fn
UpperCamelCase_ : List[Any] = name
def __call__( self : Dict , snake_case : str ) -> Dict:
"""simple docstring"""
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
UpperCamelCase_ : List[Any] = tf.cast(snake_case , tf.floataa )
UpperCamelCase_ : Any = tf.cast(self.warmup_steps , tf.floataa )
UpperCamelCase_ : List[Any] = global_step_float / warmup_steps_float
UpperCamelCase_ : List[Any] = self.initial_learning_rate * tf.math.pow(snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=snake_case , )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def __lowercase ( lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Union[str, Any] = 0.0 , lowerCamelCase : Optional[int] = 0.9 , lowerCamelCase : List[Any] = 0.9_9_9 , lowerCamelCase : Union[str, Any] = 1e-8 , lowerCamelCase : Optional[Any] = None , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Any] = 0.0 , lowerCamelCase : Dict = 1.0 , lowerCamelCase : Union[str, Any] = None , ):
UpperCamelCase_ : int = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=UpperCamelCase__ , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=UpperCamelCase__ , )
if num_warmup_steps:
UpperCamelCase_ : int = WarmUp(
initial_learning_rate=UpperCamelCase__ , decay_schedule_fn=UpperCamelCase__ , warmup_steps=UpperCamelCase__ , )
if weight_decay_rate > 0.0:
UpperCamelCase_ : Tuple = AdamWeightDecay(
learning_rate=UpperCamelCase__ , weight_decay_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=UpperCamelCase__ , )
else:
UpperCamelCase_ : Dict = tf.keras.optimizers.Adam(
learning_rate=UpperCamelCase__ , beta_a=UpperCamelCase__ , beta_a=UpperCamelCase__ , epsilon=UpperCamelCase__ , clipnorm=UpperCamelCase__ , global_clipnorm=UpperCamelCase__ , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : List[Any] , snake_case : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , snake_case : float = 0.9 , snake_case : float = 0.999 , snake_case : float = 1e-7 , snake_case : bool = False , snake_case : float = 0.0 , snake_case : Optional[List[str]] = None , snake_case : Optional[List[str]] = None , snake_case : str = "AdamWeightDecay" , **snake_case : Union[str, Any] , ) -> str:
"""simple docstring"""
super().__init__(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , **snake_case )
UpperCamelCase_ : Optional[int] = weight_decay_rate
UpperCamelCase_ : Dict = include_in_weight_decay
UpperCamelCase_ : Tuple = exclude_from_weight_decay
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : Any , snake_case : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[str] = {'WarmUp': WarmUp}
return super(snake_case , cls ).from_config(snake_case , custom_objects=snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , snake_case : int , snake_case : List[str] , snake_case : Any ) -> Optional[int]:
"""simple docstring"""
super(snake_case , self )._prepare_local(snake_case , snake_case , snake_case )
UpperCamelCase_ : str = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[str] , snake_case : List[str] , snake_case : str ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : List[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : List[Any] , snake_case : int=None , **snake_case : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Optional[Any] = list(zip(*snake_case ) )
return super(snake_case , self ).apply_gradients(zip(snake_case , snake_case ) , name=snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Any , snake_case : Tuple , snake_case : int ) -> Any:
"""simple docstring"""
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
UpperCamelCase_ : Optional[int] = apply_state or {}
UpperCamelCase_ : Any = apply_state.get((var_device, var_dtype) )
if coefficients is None:
UpperCamelCase_ : List[Any] = self._fallback_apply_state(snake_case , snake_case )
UpperCamelCase_ : List[str] = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Optional[int] , snake_case : Tuple , snake_case : Tuple=None ) -> int:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : str = self._get_lr(var.device , var.dtype.base_dtype , snake_case )
UpperCamelCase_ : Any = self._decay_weights_op(snake_case , snake_case , snake_case )
with tf.control_dependencies([decay] ):
return super(snake_case , self )._resource_apply_dense(snake_case , snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : int , snake_case : List[str] , snake_case : str , snake_case : Optional[int]=None ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = self._get_lr(var.device , var.dtype.base_dtype , snake_case )
UpperCamelCase_ : List[Any] = self._decay_weights_op(snake_case , snake_case , snake_case )
with tf.control_dependencies([decay] ):
return super(snake_case , self )._resource_apply_sparse(snake_case , snake_case , snake_case , **snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : List[Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def SCREAMING_SNAKE_CASE__ ( self : int , snake_case : List[str] ) -> Tuple:
"""simple docstring"""
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(snake_case , snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(snake_case , snake_case ) is not None:
return False
return True
class _lowercase ( UpperCAmelCase__ ):
def __init__( self : Tuple ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : Any = []
UpperCamelCase_ : int = None
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
if self._accum_steps is None:
UpperCamelCase_ : Union[str, Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def SCREAMING_SNAKE_CASE__ ( self : int ) -> str:
"""simple docstring"""
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : int , snake_case : Dict ) -> Dict:
"""simple docstring"""
if not self._gradients:
UpperCamelCase_ : Optional[Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(snake_case ) , trainable=snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(snake_case ) != len(self._gradients ):
raise ValueError(f"Expected {len(self._gradients )} gradients, but got {len(snake_case )}" )
for accum_gradient, gradient in zip(self._gradients , snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(snake_case )
self._accum_steps.assign_add(1 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(snake_case ) )
| 175 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 | 0 |
import qiskit
def lowerCAmelCase_ ( __a = 2 ) -> qiskit.result.counts.Counts:
"""simple docstring"""
lowerCamelCase__: Any =qubits
# Using Aer's simulator
lowerCamelCase__: Dict =qiskit.Aer.get_backend("aer_simulator" )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase__: Any =qiskit.QuantumCircuit(UpperCamelCase__ , UpperCamelCase__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , UpperCamelCase__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , UpperCamelCase__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(UpperCamelCase__ ) ) , list(range(UpperCamelCase__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase__: List[Any] =qiskit.execute(UpperCamelCase__ , UpperCamelCase__ , shots=1000 )
return job.result().get_counts(UpperCamelCase__ )
if __name__ == "__main__":
print(f'Total count for various states are: {quantum_entanglement(3)}')
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import numpy as np
def _a ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: Optional[Any] = int(np.ceil((x_end - xa) / h ) )
__lowerCAmelCase: str = np.zeros((n + 1,) )
__lowerCAmelCase: List[str] = ya
__lowerCAmelCase: Dict = xa
for k in range(UpperCamelCase__ ):
__lowerCAmelCase: Tuple = f(UpperCamelCase__ , y[k] )
__lowerCAmelCase: Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase: Optional[Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase: List[Any] = f(x + h , y[k] + h * ka )
__lowerCAmelCase: Union[str, Any] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 | '''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
try:
__lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
__lowerCamelCase = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
__lowerCamelCase = teacher.config_class(**UpperCamelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 67 | 0 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
UpperCamelCase__ = logging.getLogger(__name__)
UpperCamelCase__ = '''pytorch_model.bin'''
@dataclasses.dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = dataclasses.field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models.'} )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co.'} , )
@dataclasses.dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the training data.'} )
lowerCAmelCase__ = dataclasses.field(metadata={'help': 'A csv or a json file containing the data to predict on.'} )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'A csv or a json file containing the validation data.'} )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'The name of the task to train on.'} , )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'The list of labels for the task.'} )
@dataclasses.dataclass
class lowerCamelCase_ :
lowerCAmelCase__ = dataclasses.field(
metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'} )
lowerCAmelCase__ = dataclasses.field(
default='accuracy' , metadata={'help': 'The evaluation metric used for the task.'} )
lowerCAmelCase__ = dataclasses.field(
default='no' , metadata={
'help': 'The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]'
} , )
lowerCAmelCase__ = dataclasses.field(
default=1_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCAmelCase__ = dataclasses.field(
default=0.0 , metadata={
'help': 'How much the specified evaluation metric must improve to satisfy early stopping conditions.'
} , )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the confidence score.'} , )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to filter the pseudo-labeled data based on the validation performance.'} , )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'Whether to fine-tune on labeled data after pseudo training.'} , )
lowerCAmelCase__ = dataclasses.field(
default=0.0 , metadata={'help': 'Confidence threshold for pseudo-labeled data filtering.'} , )
lowerCAmelCase__ = dataclasses.field(
default=1_0_0 , metadata={'help': 'Number of evaluation calls with no improvement after which training will be stopped.'} , )
lowerCAmelCase__ = dataclasses.field(
default=UpperCAmelCase__ , metadata={'help': 'Random seed for initialization.'} , )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Any:
UpperCAmelCase__ : str = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
UpperCAmelCase__ : Tuple = dataset.filter(lambda lowerCAmelCase__ : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
UpperCAmelCase__ : Tuple = int(eval_result * len(UpperCamelCase__ ) )
print(UpperCamelCase__ )
UpperCAmelCase__ : str = dataset.sort('''probability''' , reverse=UpperCamelCase__ )
UpperCAmelCase__ : Tuple = dataset.select(range(UpperCamelCase__ ) )
UpperCAmelCase__ : List[str] = dataset.remove_columns(['''label''', '''probability'''] )
UpperCAmelCase__ : str = dataset.rename_column('''prediction''' , '''label''' )
UpperCAmelCase__ : Dict = dataset.map(lambda lowerCAmelCase__ : {"label": idalabel[example["label"]]} )
UpperCAmelCase__ : Tuple = dataset.shuffle(seed=args.seed )
UpperCAmelCase__ : str = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(UpperCamelCase__ , index=UpperCamelCase__ )
else:
dataset.to_json(UpperCamelCase__ )
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[int]:
UpperCAmelCase__ : str = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase__ : str = STModelArguments(model_name_or_path=UpperCamelCase__ )
UpperCAmelCase__ : int = STDataArguments(train_file=UpperCamelCase__ , infer_file=UpperCamelCase__ )
UpperCAmelCase__ : str = STTrainingArguments(output_dir=UpperCamelCase__ )
UpperCAmelCase__ : List[str] = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(UpperCamelCase__ ).items():
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for key, value in kwargs.items():
if hasattr(UpperCamelCase__ , UpperCamelCase__ ):
setattr(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Sanity checks
UpperCAmelCase__ : Union[str, Any] = {}
UpperCAmelCase__ : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
UpperCAmelCase__ : str = args.train_file
UpperCAmelCase__ : Optional[Any] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
UpperCAmelCase__ : Optional[int] = args.eval_file
for key in data_files:
UpperCAmelCase__ : List[Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
UpperCAmelCase__ : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
UpperCAmelCase__ : Tuple = F"""{args.output_dir}/self-train_iter-{{}}""".format
UpperCAmelCase__ : Optional[int] = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=UpperCamelCase__ )
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
accelerator.wait_for_everyone()
UpperCAmelCase__ : Any = None
UpperCAmelCase__ : Union[str, Any] = None
UpperCAmelCase__ : List[Any] = 0
UpperCAmelCase__ : Optional[Any] = False
# Show the progress bar
UpperCAmelCase__ : Union[str, Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
UpperCAmelCase__ : Any = data_dir_format(UpperCamelCase__ )
assert os.path.exists(UpperCamelCase__ )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , '''stage-1''' )
UpperCAmelCase__ : str = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(UpperCamelCase__ , UpperCamelCase__ ):
arguments_dict.update({key: value} )
UpperCAmelCase__ : Union[str, Any] = os.path.join(UpperCamelCase__ , '''best-checkpoint''' , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , UpperCamelCase__ )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
UpperCAmelCase__ : Optional[int] = os.path.join(UpperCamelCase__ , '''best-checkpoint''' )
UpperCAmelCase__ : int = os.path.join(UpperCamelCase__ , '''stage-2''' )
# Update arguments_dict
UpperCAmelCase__ : List[str] = model_path
UpperCAmelCase__ : Union[str, Any] = data_files['''train''']
UpperCAmelCase__ : List[Any] = current_output_dir
UpperCAmelCase__ : Any = os.path.join(UpperCamelCase__ , '''best-checkpoint''' , UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , UpperCamelCase__ , UpperCamelCase__ , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , UpperCamelCase__ )
finetune(**UpperCamelCase__ )
accelerator.wait_for_everyone()
assert os.path.exists(UpperCamelCase__ )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , UpperCamelCase__ )
UpperCAmelCase__ : Any = iteration
UpperCAmelCase__ : str = data_dir_format(iteration + 1 )
UpperCAmelCase__ : Any = AutoConfig.from_pretrained(os.path.join(UpperCamelCase__ , '''best-checkpoint''' ) )
UpperCAmelCase__ : Optional[Any] = config.idalabel
UpperCAmelCase__ : List[Any] = os.path.join(UpperCamelCase__ , '''eval_results_best-checkpoint.json''' )
UpperCAmelCase__ : List[str] = os.path.join(UpperCamelCase__ , '''test_results_best-checkpoint.json''' )
assert os.path.exists(UpperCamelCase__ )
with open(UpperCamelCase__ , '''r''' ) as f:
UpperCAmelCase__ : Any = float(json.load(UpperCamelCase__ )[args.eval_metric] )
UpperCAmelCase__ : List[Any] = os.path.join(UpperCamelCase__ , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(UpperCamelCase__ )
# Loading the dataset from local csv or json files.
UpperCAmelCase__ : List[Any] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
UpperCAmelCase__ : Optional[Any] = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(UpperCamelCase__ , exist_ok=UpperCamelCase__ )
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(UpperCamelCase__ ):
shutil.copy(UpperCamelCase__ , os.path.join(UpperCamelCase__ , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
accelerator.wait_for_everyone()
UpperCAmelCase__ : Dict = os.path.join(UpperCamelCase__ , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
UpperCAmelCase__ : Optional[Any] = eval_result
if best_iteration is None:
UpperCAmelCase__ : Optional[int] = new_iteration
UpperCAmelCase__ : Any = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
UpperCAmelCase__ : List[str] = new_iteration
UpperCAmelCase__ : Optional[Any] = new_eval_result
UpperCAmelCase__ : Any = 0
else:
if new_eval_result == best_eval_result:
UpperCAmelCase__ : List[str] = new_iteration
UpperCAmelCase__ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
UpperCAmelCase__ : int = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , UpperCamelCase__ )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(UpperCamelCase__ , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , UpperCamelCase__ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(UpperCamelCase__ , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(UpperCamelCase__ , '''eval_results_best-iteration.json''' ) , )
| 181 | '''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67 | 0 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
a_ : Dict = logging.get_logger(__name__)
class __UpperCamelCase ( UpperCAmelCase__ ):
def __init__( self, lowerCAmelCase ):
"""simple docstring"""
super().__init__()
lowerCamelCase_ =nn.ModuleList(lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = True, ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(lowerCAmelCase, lowerCAmelCase, self.nets ) ):
lowerCamelCase_, lowerCamelCase_ =controlnet(
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, )
# merge samples
if i == 0:
lowerCamelCase_, lowerCamelCase_ =down_samples, mid_sample
else:
lowerCamelCase_ =[
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowerCAmelCase, lowerCAmelCase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase = True, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = None, ):
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowerCAmelCase, is_main_process=lowerCAmelCase, save_function=lowerCAmelCase, safe_serialization=lowerCAmelCase, variant=lowerCAmelCase, )
idx += 1
lowerCamelCase_ =model_path_to_save + f'''_{idx}'''
@classmethod
def lowercase__ ( cls, lowerCAmelCase, **lowerCAmelCase ):
"""simple docstring"""
lowerCamelCase_ =0
lowerCamelCase_ =[]
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
lowerCamelCase_ =pretrained_model_path
while os.path.isdir(lowerCAmelCase ):
lowerCamelCase_ =ControlNetModel.from_pretrained(lowerCAmelCase, **lowerCAmelCase )
controlnets.append(lowerCAmelCase )
idx += 1
lowerCamelCase_ =pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowerCAmelCase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowerCAmelCase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowerCAmelCase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowerCAmelCase )
| 75 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 67 | 0 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__UpperCamelCase : str = {'configuration_van': ['VAN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'VanConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : int = [
'VAN_PRETRAINED_MODEL_ARCHIVE_LIST',
'VanForImageClassification',
'VanModel',
'VanPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_van import VAN_PRETRAINED_CONFIG_ARCHIVE_MAP, VanConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_van import (
VAN_PRETRAINED_MODEL_ARCHIVE_LIST,
VanForImageClassification,
VanModel,
VanPreTrainedModel,
)
else:
import sys
__UpperCamelCase : int = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 182 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'transfo-xl-wt103': 'https://huggingface.co/transfo-xl-wt103/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
_a = "transfo-xl"
_a = ["mems"]
_a = {
"n_token": "vocab_size",
"hidden_size": "d_model",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Union[str, Any] , lowerCAmelCase : List[Any]=26_7735 , lowerCAmelCase : Tuple=[2_0000, 4_0000, 20_0000] , lowerCAmelCase : Tuple=1024 , lowerCAmelCase : Tuple=1024 , lowerCAmelCase : List[Any]=16 , lowerCAmelCase : str=64 , lowerCAmelCase : Optional[Any]=4096 , lowerCAmelCase : List[Any]=4 , lowerCAmelCase : Tuple=False , lowerCAmelCase : List[str]=18 , lowerCAmelCase : int=1600 , lowerCAmelCase : List[str]=1000 , lowerCAmelCase : Union[str, Any]=True , lowerCAmelCase : Tuple=True , lowerCAmelCase : Tuple=0 , lowerCAmelCase : Any=-1 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[int]=0.1 , lowerCAmelCase : Union[str, Any]=0.0 , lowerCAmelCase : List[Any]=True , lowerCAmelCase : Union[str, Any]="normal" , lowerCAmelCase : Dict=0.01 , lowerCAmelCase : Dict=0.01 , lowerCAmelCase : int=0.02 , lowerCAmelCase : int=1e-5 , lowerCAmelCase : str=0 , **lowerCAmelCase : Optional[int] , ):
lowerCAmelCase = vocab_size
lowerCAmelCase = []
self.cutoffs.extend(lowerCAmelCase )
if proj_share_all_but_first:
lowerCAmelCase = [False] + [True] * len(self.cutoffs )
else:
lowerCAmelCase = [False] + [False] * len(self.cutoffs )
lowerCAmelCase = d_model
lowerCAmelCase = d_embed
lowerCAmelCase = d_head
lowerCAmelCase = d_inner
lowerCAmelCase = div_val
lowerCAmelCase = pre_lnorm
lowerCAmelCase = n_layer
lowerCAmelCase = n_head
lowerCAmelCase = mem_len
lowerCAmelCase = same_length
lowerCAmelCase = attn_type
lowerCAmelCase = clamp_len
lowerCAmelCase = sample_softmax
lowerCAmelCase = adaptive
lowerCAmelCase = dropout
lowerCAmelCase = dropatt
lowerCAmelCase = untie_r
lowerCAmelCase = init
lowerCAmelCase = init_range
lowerCAmelCase = proj_init_std
lowerCAmelCase = init_std
lowerCAmelCase = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase , **lowerCAmelCase )
@property
def __lowercase ( self : str ):
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
return -1
@max_position_embeddings.setter
def __lowercase ( self : str , lowerCAmelCase : List[str] ):
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''' )
| 155 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | 0 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : Tuple = "CompVis/stable-diffusion-v1-1"
a : int = "CompVis/stable-diffusion-v1-2"
a : str = "CompVis/stable-diffusion-v1-3"
a : List[Any] = "CompVis/stable-diffusion-v1-4"
class a ( UpperCAmelCase__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __lowercase : AutoencoderKL , __lowercase : CLIPTextModel , __lowercase : CLIPTokenizer , __lowercase : UNetaDConditionModel , __lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowercase : StableDiffusionSafetyChecker , __lowercase : CLIPImageProcessor , __lowercase : bool = True , ) -> Dict:
super()._init_()
__UpperCAmelCase : Any = StableDiffusionPipeline.from_pretrained(__lowercase )
__UpperCAmelCase : Dict = StableDiffusionPipeline.from_pretrained(__lowercase )
__UpperCAmelCase : Dict = StableDiffusionPipeline.from_pretrained(__lowercase )
__UpperCAmelCase : List[Any] = StableDiffusionPipeline(
vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , unet=__lowercase , scheduler=__lowercase , safety_checker=__lowercase , feature_extractor=__lowercase , requires_safety_checker=__lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCAmelCase ( self : Dict ) -> str:
return {k: getattr(self , __lowercase ) for k in self.config.keys() if not k.startswith("""_""" )}
def UpperCAmelCase ( self : List[Any] , __lowercase : Optional[Union[str, int]] = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__UpperCAmelCase : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowercase )
def UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
self.enable_attention_slicing(__lowercase )
@torch.no_grad()
def UpperCAmelCase ( self : Any , __lowercase : Union[str, List[str]] , __lowercase : int = 512 , __lowercase : int = 512 , __lowercase : int = 50 , __lowercase : float = 7.5 , __lowercase : Optional[Union[str, List[str]]] = None , __lowercase : Optional[int] = 1 , __lowercase : float = 0.0 , __lowercase : Optional[torch.Generator] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , **__lowercase : Union[str, Any] , ) -> Any:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, List[str]] , __lowercase : int = 512 , __lowercase : int = 512 , __lowercase : int = 50 , __lowercase : float = 7.5 , __lowercase : Optional[Union[str, List[str]]] = None , __lowercase : Optional[int] = 1 , __lowercase : float = 0.0 , __lowercase : Optional[torch.Generator] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , **__lowercase : Tuple , ) -> List[str]:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase ( self : List[str] , __lowercase : Union[str, List[str]] , __lowercase : int = 512 , __lowercase : int = 512 , __lowercase : int = 50 , __lowercase : float = 7.5 , __lowercase : Optional[Union[str, List[str]]] = None , __lowercase : Optional[int] = 1 , __lowercase : float = 0.0 , __lowercase : Optional[torch.Generator] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , **__lowercase : str , ) -> int:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Union[str, List[str]] , __lowercase : int = 512 , __lowercase : int = 512 , __lowercase : int = 50 , __lowercase : float = 7.5 , __lowercase : Optional[Union[str, List[str]]] = None , __lowercase : Optional[int] = 1 , __lowercase : float = 0.0 , __lowercase : Optional[torch.Generator] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , **__lowercase : Tuple , ) -> Dict:
return self.pipea(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
@torch.no_grad()
def UpperCAmelCase ( self : Optional[int] , __lowercase : Union[str, List[str]] , __lowercase : int = 512 , __lowercase : int = 512 , __lowercase : int = 50 , __lowercase : float = 7.5 , __lowercase : Optional[Union[str, List[str]]] = None , __lowercase : Optional[int] = 1 , __lowercase : float = 0.0 , __lowercase : Optional[torch.Generator] = None , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[str] = "pil" , __lowercase : bool = True , __lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowercase : int = 1 , **__lowercase : str , ) -> str:
__UpperCAmelCase : Tuple = """cuda""" if torch.cuda.is_available() else """cpu"""
self.to(__lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
__UpperCAmelCase : Optional[Any] = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
__UpperCAmelCase : Any = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
__UpperCAmelCase : Tuple = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
__UpperCAmelCase : Any = self.textaimg_sda_a(
prompt=__lowercase , height=__lowercase , width=__lowercase , num_inference_steps=__lowercase , guidance_scale=__lowercase , negative_prompt=__lowercase , num_images_per_prompt=__lowercase , eta=__lowercase , generator=__lowercase , latents=__lowercase , output_type=__lowercase , return_dict=__lowercase , callback=__lowercase , callback_steps=__lowercase , **__lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 114 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 0 |
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 343 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 67 | 0 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
_lowerCAmelCase : Tuple = logging.get_logger("transformers.models.speecht5")
_lowerCAmelCase : int = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
_lowerCAmelCase : List[str] = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
_lowerCAmelCase : List[Any] = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
_lowerCAmelCase : Tuple = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
_lowerCAmelCase : Tuple = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
_lowerCAmelCase : Any = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
_lowerCAmelCase : str = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
_lowerCAmelCase : Optional[Any] = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
_lowerCAmelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
_lowerCAmelCase : Tuple = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : List[str] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
_lowerCAmelCase : Dict = []
_lowerCAmelCase : Any = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
_lowerCAmelCase : str = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
_lowerCAmelCase : List[str] = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
_lowerCAmelCase : List[Any] = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def UpperCamelCase_( _snake_case : Tuple , _snake_case : Tuple , _snake_case : Any , _snake_case : List[Any] , _snake_case : Optional[int] ):
"""simple docstring"""
for attribute in key.split('.' ):
__a =getattr(UpperCamelCase__ , UpperCamelCase__ )
if weight_type is not None:
__a =getattr(UpperCamelCase__ , UpperCamelCase__ ).shape
else:
__a =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}' )
if weight_type == "weight":
__a =value
elif weight_type == "weight_g":
__a =value
elif weight_type == "weight_v":
__a =value
elif weight_type == "bias":
__a =value
elif weight_type == "running_mean":
__a =value
elif weight_type == "running_var":
__a =value
elif weight_type == "num_batches_tracked":
__a =value
else:
__a =value
logger.info(F'{key + ("." + weight_type if weight_type is not None else "")} was initialized from {full_name}.' )
def UpperCamelCase_( _snake_case : str , _snake_case : Dict ):
"""simple docstring"""
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__a , __a =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def UpperCamelCase_( _snake_case : Optional[int] , _snake_case : List[str] , _snake_case : List[Any] ):
"""simple docstring"""
__a =[]
if task == "s2t":
__a =hf_model.speechta.encoder.prenet.feature_encoder
__a =MAPPING_S2T
__a =IGNORE_KEYS_S2T
elif task == "t2s":
__a =None
__a =MAPPING_T2S
__a =IGNORE_KEYS_T2S
elif task == "s2s":
__a =hf_model.speechta.encoder.prenet.feature_encoder
__a =MAPPING_S2S
__a =IGNORE_KEYS_S2S
else:
raise ValueError(F'Unsupported task: {task}' )
for name, value in fairseq_dict.items():
if should_ignore(UpperCamelCase__ , UpperCamelCase__ ):
logger.info(F'{name} was ignored' )
continue
__a =False
if "conv_layers" in name:
load_conv_layer(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , hf_model.config.feat_extract_norm == 'group' , )
__a =True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__a , __a =key.split('.*.' )
if prefix in name and suffix in name:
__a =suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__a =True
if "*" in mapped_key:
__a =name.split(UpperCamelCase__ )[0].split('.' )[-2]
__a =mapped_key.replace('*' , UpperCamelCase__ )
if "weight_g" in name:
__a ='weight_g'
elif "weight_v" in name:
__a ='weight_v'
elif "bias" in name:
__a ='bias'
elif "weight" in name:
__a ='weight'
elif "running_mean" in name:
__a ='running_mean'
elif "running_var" in name:
__a ='running_var'
elif "num_batches_tracked" in name:
__a ='num_batches_tracked'
else:
__a =None
set_recursively(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
continue
if not is_used:
unused_weights.append(UpperCamelCase__ )
logger.warning(F'Unused weights: {unused_weights}' )
def UpperCamelCase_( _snake_case : Any , _snake_case : List[str] , _snake_case : Any , _snake_case : Optional[Any] , _snake_case : Union[str, Any] ):
"""simple docstring"""
__a =full_name.split('conv_layers.' )[-1]
__a =name.split('.' )
__a =int(items[0] )
__a =int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.' )
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.' )
__a =value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.' )
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.' )
__a =value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(UpperCamelCase__ )
@torch.no_grad()
def UpperCamelCase_( _snake_case : Union[str, Any] , _snake_case : List[Any] , _snake_case : Any , _snake_case : Any=None , _snake_case : Optional[Any]=None , _snake_case : Optional[Any]=None , ):
"""simple docstring"""
if config_path is not None:
__a =SpeechTaConfig.from_pretrained(UpperCamelCase__ )
else:
__a =SpeechTaConfig()
if task == "s2t":
__a =config.max_text_positions
__a =SpeechTaForSpeechToText(UpperCamelCase__ )
elif task == "t2s":
__a =1876
__a =600
__a =config.max_speech_positions
__a =SpeechTaForTextToSpeech(UpperCamelCase__ )
elif task == "s2s":
__a =1876
__a =config.max_speech_positions
__a =SpeechTaForSpeechToSpeech(UpperCamelCase__ )
else:
raise ValueError(F'Unknown task name: {task}' )
if vocab_path:
__a =SpeechTaTokenizer(UpperCamelCase__ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__a =AddedToken('<mask>' , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ )
__a =mask_token
tokenizer.add_special_tokens({'mask_token': mask_token} )
tokenizer.add_tokens(['<ctc_blank>'] )
__a =SpeechTaFeatureExtractor()
__a =SpeechTaProcessor(tokenizer=UpperCamelCase__ , feature_extractor=UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
__a =torch.load(UpperCamelCase__ )
recursively_load_weights(fairseq_checkpoint['model'] , UpperCamelCase__ , UpperCamelCase__ )
model.save_pretrained(UpperCamelCase__ )
if repo_id:
print('Pushing to the hub...' )
processor.push_to_hub(UpperCamelCase__ )
model.push_to_hub(UpperCamelCase__ )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
_lowerCAmelCase : List[Any] = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 218 | '''simple docstring'''
from __future__ import annotations
from decimal import Decimal
from numpy import array
def __lowerCAmelCase ( UpperCamelCase__ ) -> list[list[float]]:
__lowerCamelCase = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(UpperCamelCase__ ) == 2 and len(matrix[0] ) == 2 and len(matrix[1] ) == 2:
# Calculate the determinant of the matrix
__lowerCamelCase = float(
d(matrix[0][0] ) * d(matrix[1][1] ) - d(matrix[1][0] ) * d(matrix[0][1] ) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creates a copy of the matrix with swapped positions of the elements
__lowerCamelCase = [[0.0, 0.0], [0.0, 0.0]]
__lowerCamelCase , __lowerCamelCase = matrix[1][1], matrix[0][0]
__lowerCamelCase , __lowerCamelCase = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(UpperCamelCase__ ) ) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(UpperCamelCase__ ) == 3
and len(matrix[0] ) == 3
and len(matrix[1] ) == 3
and len(matrix[2] ) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__lowerCamelCase = float(
(
(d(matrix[0][0] ) * d(matrix[1][1] ) * d(matrix[2][2] ))
+ (d(matrix[0][1] ) * d(matrix[1][2] ) * d(matrix[2][0] ))
+ (d(matrix[0][2] ) * d(matrix[1][0] ) * d(matrix[2][1] ))
)
- (
(d(matrix[0][2] ) * d(matrix[1][1] ) * d(matrix[2][0] ))
+ (d(matrix[0][1] ) * d(matrix[1][0] ) * d(matrix[2][2] ))
+ (d(matrix[0][0] ) * d(matrix[1][2] ) * d(matrix[2][1] ))
) )
if determinant == 0:
raise ValueError('''This matrix has no inverse.''' )
# Creating cofactor matrix
__lowerCamelCase = [
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
[d(0.0 ), d(0.0 ), d(0.0 )],
]
__lowerCamelCase = (d(matrix[1][1] ) * d(matrix[2][2] )) - (
d(matrix[1][2] ) * d(matrix[2][1] )
)
__lowerCamelCase = -(
(d(matrix[1][0] ) * d(matrix[2][2] )) - (d(matrix[1][2] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[1][0] ) * d(matrix[2][1] )) - (
d(matrix[1][1] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][1] ) * d(matrix[2][2] )) - (d(matrix[0][2] ) * d(matrix[2][1] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[2][2] )) - (
d(matrix[0][2] ) * d(matrix[2][0] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[2][1] )) - (d(matrix[0][1] ) * d(matrix[2][0] ))
)
__lowerCamelCase = (d(matrix[0][1] ) * d(matrix[1][2] )) - (
d(matrix[0][2] ) * d(matrix[1][1] )
)
__lowerCamelCase = -(
(d(matrix[0][0] ) * d(matrix[1][2] )) - (d(matrix[0][2] ) * d(matrix[1][0] ))
)
__lowerCamelCase = (d(matrix[0][0] ) * d(matrix[1][1] )) - (
d(matrix[0][1] ) * d(matrix[1][0] )
)
# Transpose the cofactor matrix (Adjoint matrix)
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
__lowerCamelCase = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__lowerCamelCase = array(UpperCamelCase__ )
for i in range(3 ):
for j in range(3 ):
inverse_matrix[i][j] /= d(UpperCamelCase__ )
# Calculate the inverse of the matrix
return [[float(d(UpperCamelCase__ ) ) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError('''Please provide a matrix of size 2x2 or 3x3.''' )
| 67 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
_lowerCamelCase : Dict = logging.get_logger(__name__)
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
return [
int(1_000 * (box[0] / width) ),
int(1_000 * (box[1] / height) ),
int(1_000 * (box[2] / width) ),
int(1_000 * (box[3] / height) ),
]
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
UpperCamelCase = to_pil_image(UpperCamelCase__ )
UpperCamelCase , UpperCamelCase = pil_image.size
UpperCamelCase = pytesseract.image_to_data(UpperCamelCase__ , lang=UpperCamelCase__ , output_type='dict' , config=UpperCamelCase__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = data['text'], data['left'], data['top'], data['width'], data['height']
# filter empty words and corresponding coordinates
UpperCamelCase = [idx for idx, word in enumerate(UpperCamelCase__ ) if not word.strip()]
UpperCamelCase = [word for idx, word in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
UpperCamelCase = [coord for idx, coord in enumerate(UpperCamelCase__ ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
UpperCamelCase = []
for x, y, w, h in zip(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
UpperCamelCase = [x, y, x + w, y + h]
actual_boxes.append(UpperCamelCase__ )
# finally, normalize the bounding boxes
UpperCamelCase = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = ["pixel_values"]
def __init__( self : List[Any] , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = True , UpperCamelCase__ : float = 1 / 2_5_5 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = "" , **UpperCamelCase__ : Any , ):
"""simple docstring"""
super().__init__(**UpperCamelCase__ )
UpperCamelCase = size if size is not None else {'height': 2_2_4, 'width': 2_2_4}
UpperCamelCase = get_size_dict(UpperCamelCase__ )
UpperCamelCase = do_resize
UpperCamelCase = size
UpperCamelCase = resample
UpperCamelCase = do_rescale
UpperCamelCase = rescale_value
UpperCamelCase = do_normalize
UpperCamelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCamelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
UpperCamelCase = apply_ocr
UpperCamelCase = ocr_lang
UpperCamelCase = tesseract_config
def A ( self : Optional[int] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
UpperCamelCase = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
UpperCamelCase = (size['height'], size['width'])
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
"""simple docstring"""
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, Iterable[float]] , UpperCamelCase__ : Union[float, Iterable[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : int , ):
"""simple docstring"""
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def A ( self : Union[str, Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : Optional[Any]=None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : Union[float, Iterable[float]] = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[str] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : List[Any] , ):
"""simple docstring"""
UpperCamelCase = do_resize if do_resize is not None else self.do_resize
UpperCamelCase = size if size is not None else self.size
UpperCamelCase = get_size_dict(UpperCamelCase__ )
UpperCamelCase = resample if resample is not None else self.resample
UpperCamelCase = do_rescale if do_rescale is not None else self.do_rescale
UpperCamelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCamelCase = do_normalize if do_normalize is not None else self.do_normalize
UpperCamelCase = image_mean if image_mean is not None else self.image_mean
UpperCamelCase = image_std if image_std is not None else self.image_std
UpperCamelCase = apply_ocr if apply_ocr is not None else self.apply_ocr
UpperCamelCase = ocr_lang if ocr_lang is not None else self.ocr_lang
UpperCamelCase = tesseract_config if tesseract_config is not None else self.tesseract_config
UpperCamelCase = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('If do_normalize is True, image_mean and image_std must be specified.' )
# All transformations expect numpy arrays.
UpperCamelCase = [to_numpy_array(UpperCamelCase__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , 'pytesseract' )
UpperCamelCase = []
UpperCamelCase = []
for image in images:
UpperCamelCase , UpperCamelCase = apply_tesseract(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
words_batch.append(UpperCamelCase__ )
boxes_batch.append(UpperCamelCase__ )
if do_resize:
UpperCamelCase = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
UpperCamelCase = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
UpperCamelCase = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
UpperCamelCase = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
UpperCamelCase = BatchFeature(data={'pixel_values': images} , tensor_type=UpperCamelCase__ )
if apply_ocr:
UpperCamelCase = words_batch
UpperCamelCase = boxes_batch
return data
| 28 | '''simple docstring'''
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__=None , UpperCamelCase__=None ) -> int:
return field(default_factory=lambda: default , metadata=UpperCamelCase__ )
@dataclass
class a__ :
lowerCamelCase : List[str] =list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
lowerCamelCase : List[int] =list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} )
lowerCamelCase : List[int] =list_field(
default=[8, 3_2, 1_2_8, 5_1_2] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Use FP16 to accelerate inference."} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Benchmark training of model"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Verbose memory tracing"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Trace memory line by line"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save result to a CSV file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Save all print statements in a log file"} )
lowerCamelCase : bool =field(default=UpperCAmelCase__ , metadata={"help": "Whether to print environment information"} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
lowerCamelCase : str =field(
default=F'''inference_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv."} , )
lowerCamelCase : str =field(
default=F'''inference_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
lowerCamelCase : str =field(
default=F'''train_time_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''train_memory_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
lowerCamelCase : str =field(
default=F'''env_info_{round(time() )}.csv''' , metadata={"help": "CSV filename used if saving environment information."} , )
lowerCamelCase : str =field(
default=F'''log_{round(time() )}.csv''' , metadata={"help": "Log filename used if print statements are saved in log."} , )
lowerCamelCase : int =field(default=3 , metadata={"help": "Times an experiment will be run."} )
lowerCamelCase : bool =field(
default=UpperCAmelCase__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , a , )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 67 | 0 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def __lowercase ( lowerCamelCase : str , lowerCamelCase : Dict ):
UpperCamelCase_ : int = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
UpperCamelCase_ : Dict = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCamelCase_ : str = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=UpperCamelCase__ , output_all_encodings=UpperCamelCase__ , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , UpperCamelCase__ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCamelCase_ : str = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
UpperCamelCase_ : Any = os.path.join(get_home_dir() , 'models' )
UpperCamelCase_ : Optional[int] = _load_vocab(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , cls=UpperCamelCase__ )
UpperCamelCase_ : Optional[Any] = nlp.model.BERTModel(
UpperCamelCase__ , len(UpperCamelCase__ ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=UpperCamelCase__ , use_token_type_embed=UpperCamelCase__ , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=UpperCamelCase__ , use_decoder=UpperCamelCase__ , )
original_bort.load_parameters(UpperCamelCase__ , cast_dtype=UpperCamelCase__ , ignore_extra=UpperCamelCase__ )
UpperCamelCase_ : Union[str, Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCamelCase_ : Tuple = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.0_2,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(UpperCamelCase__ ),
}
UpperCamelCase_ : Tuple = BertConfig.from_dict(UpperCamelCase__ )
UpperCamelCase_ : List[Any] = BertForMaskedLM(UpperCamelCase__ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase : List[Any] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase : Any , lowerCamelCase : Dict ):
UpperCamelCase_ : List[str] = hf_param.shape
UpperCamelCase_ : Union[str, Any] = to_torch(params[gluon_param] )
UpperCamelCase_ : Optional[Any] = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
UpperCamelCase_ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
UpperCamelCase_ : str = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
UpperCamelCase_ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
UpperCamelCase_ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCamelCase_ : Optional[Any] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCamelCase_ : Union[str, Any] = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCamelCase_ : int = layer.attention.self
UpperCamelCase_ : str = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
UpperCamelCase_ : str = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
UpperCamelCase_ : Optional[int] = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
UpperCamelCase_ : str = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
UpperCamelCase_ : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
UpperCamelCase_ : Union[str, Any] = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
UpperCamelCase_ : Optional[Any] = layer.attention.output
UpperCamelCase_ : Any = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
UpperCamelCase_ : int = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
UpperCamelCase_ : Union[str, Any] = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
UpperCamelCase_ : Tuple = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
UpperCamelCase_ : int = layer.intermediate
UpperCamelCase_ : Optional[Any] = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
UpperCamelCase_ : Optional[int] = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
UpperCamelCase_ : int = layer.output
UpperCamelCase_ : Union[str, Any] = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
UpperCamelCase_ : Optional[int] = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
UpperCamelCase_ : str = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
UpperCamelCase_ : Any = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCamelCase_ : Dict = RobertaTokenizer.from_pretrained('roberta-base' )
UpperCamelCase_ : List[Any] = tokenizer.encode_plus(UpperCamelCase__ )['input_ids']
# Get gluon output
UpperCamelCase_ : Tuple = mx.nd.array([input_ids] )
UpperCamelCase_ : List[Any] = original_bort(inputs=UpperCamelCase__ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(UpperCamelCase__ )
UpperCamelCase_ : Union[str, Any] = BertModel.from_pretrained(UpperCamelCase__ )
hf_bort_model.eval()
UpperCamelCase_ : List[str] = tokenizer.encode_plus(UpperCamelCase__ , return_tensors='pt' )
UpperCamelCase_ : Optional[int] = hf_bort_model(**UpperCamelCase__ )[0]
UpperCamelCase_ : Optional[int] = output_gluon[0].asnumpy()
UpperCamelCase_ : Dict = output_hf[0].detach().numpy()
UpperCamelCase_ : List[Any] = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCamelCase_ : Any = np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , UpperCamelCase__ )
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 175 | '''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_barthez import BarthezTokenizer
else:
__UpperCAmelCase =None
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase ={
"vocab_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"moussaKam/mbarthez": "https://huggingface.co/moussaKam/mbarthez/resolve/main/tokenizer.json",
"moussaKam/barthez": "https://huggingface.co/moussaKam/barthez/resolve/main/tokenizer.json",
"moussaKam/barthez-orangesum-title": (
"https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase ={
"moussaKam/mbarthez": 1_0_2_4,
"moussaKam/barthez": 1_0_2_4,
"moussaKam/barthez-orangesum-title": 1_0_2_4,
}
__UpperCAmelCase ="▁"
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =VOCAB_FILES_NAMES
lowerCamelCase : Optional[Any] =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : int =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[Any] =["input_ids", "attention_mask"]
lowerCamelCase : Union[str, Any] =BarthezTokenizer
def __init__( self : Optional[Any] , a : Dict=None , a : str=None , a : List[Any]="<s>" , a : Optional[int]="</s>" , a : List[str]="</s>" , a : Tuple="<s>" , a : str="<unk>" , a : Any="<pad>" , a : Union[str, Any]="<mask>" , **a : Union[str, Any] , ):
"""simple docstring"""
__lowerCamelCase = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , cls_token=a , pad_token=a , mask_token=a , **a , )
__lowerCamelCase = vocab_file
__lowerCamelCase = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
"""simple docstring"""
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : str , a : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(a ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,)
| 67 | 0 |
import inspect
from typing import Optional, Union
import numpy as np
import PIL
import torch
from torch.nn import functional as F
from torchvision import transforms
from transformers import CLIPFeatureExtractor, CLIPModel, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
DPMSolverMultistepScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.utils import (
PIL_INTERPOLATION,
randn_tensor,
)
def lowerCAmelCase_ ( __a , __a , __a ) -> int:
"""simple docstring"""
if isinstance(UpperCamelCase__ , torch.Tensor ):
return image
elif isinstance(UpperCamelCase__ , PIL.Image.Image ):
lowerCamelCase__: Dict =[image]
if isinstance(image[0] , PIL.Image.Image ):
lowerCamelCase__: int =[np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["lanczos"] ) )[None, :] for i in image]
lowerCamelCase__: int =np.concatenate(UpperCamelCase__ , axis=0 )
lowerCamelCase__: Any =np.array(UpperCamelCase__ ).astype(np.floataa ) / 255.0
lowerCamelCase__: List[Any] =image.transpose(0 , 3 , 1 , 2 )
lowerCamelCase__: Union[str, Any] =2.0 * image - 1.0
lowerCamelCase__: List[str] =torch.from_numpy(UpperCamelCase__ )
elif isinstance(image[0] , torch.Tensor ):
lowerCamelCase__: Any =torch.cat(UpperCamelCase__ , dim=0 )
return image
def lowerCAmelCase_ ( __a , __a , __a , __a=0.9_9_9_5 ) -> Dict:
"""simple docstring"""
if not isinstance(UpperCamelCase__ , np.ndarray ):
lowerCamelCase__: List[str] =True
lowerCamelCase__: Optional[Any] =va.device
lowerCamelCase__: Dict =va.cpu().numpy()
lowerCamelCase__: Tuple =va.cpu().numpy()
lowerCamelCase__: List[Any] =np.sum(va * va / (np.linalg.norm(UpperCamelCase__ ) * np.linalg.norm(UpperCamelCase__ )) )
if np.abs(UpperCamelCase__ ) > DOT_THRESHOLD:
lowerCamelCase__: Any =(1 - t) * va + t * va
else:
lowerCamelCase__: Optional[int] =np.arccos(UpperCamelCase__ )
lowerCamelCase__: List[str] =np.sin(UpperCamelCase__ )
lowerCamelCase__: Any =theta_a * t
lowerCamelCase__: Any =np.sin(UpperCamelCase__ )
lowerCamelCase__: Any =np.sin(theta_a - theta_t ) / sin_theta_a
lowerCamelCase__: Dict =sin_theta_t / sin_theta_a
lowerCamelCase__: Optional[int] =sa * va + sa * va
if inputs_are_torch:
lowerCamelCase__: List[str] =torch.from_numpy(UpperCamelCase__ ).to(UpperCamelCase__ )
return va
def lowerCAmelCase_ ( __a , __a ) -> List[Any]:
"""simple docstring"""
lowerCamelCase__: List[Any] =F.normalize(UpperCamelCase__ , dim=-1 )
lowerCamelCase__: Optional[int] =F.normalize(UpperCamelCase__ , dim=-1 )
return (x - y).norm(dim=-1 ).div(2 ).arcsin().pow(2 ).mul(2 )
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
for param in model.parameters():
lowerCamelCase__: Optional[int] =value
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__(self : int , UpperCAmelCase_ : AutoencoderKL , UpperCAmelCase_ : CLIPTextModel , UpperCAmelCase_ : CLIPModel , UpperCAmelCase_ : CLIPTokenizer , UpperCAmelCase_ : UNetaDConditionModel , UpperCAmelCase_ : Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler] , UpperCAmelCase_ : CLIPFeatureExtractor , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : int=None , ) ->str:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=UpperCAmelCase_ , text_encoder=UpperCAmelCase_ , clip_model=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , unet=UpperCAmelCase_ , scheduler=UpperCAmelCase_ , feature_extractor=UpperCAmelCase_ , coca_model=UpperCAmelCase_ , coca_tokenizer=UpperCAmelCase_ , coca_transform=UpperCAmelCase_ , )
lowerCamelCase__: List[str] =(
feature_extractor.size
if isinstance(feature_extractor.size , UpperCAmelCase_)
else feature_extractor.size["shortest_edge"]
)
lowerCamelCase__: List[Any] =transforms.Normalize(mean=feature_extractor.image_mean , std=feature_extractor.image_std)
set_requires_grad(self.text_encoder , UpperCAmelCase_)
set_requires_grad(self.clip_model , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Optional[Union[str, int]] = "auto") ->List[str]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase__: Optional[Any] =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Any:
'''simple docstring'''
self.enable_attention_slicing(UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Optional[Any]) ->Any:
'''simple docstring'''
set_requires_grad(self.vae , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Any) ->Tuple:
'''simple docstring'''
set_requires_grad(self.vae , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int) ->Dict:
'''simple docstring'''
set_requires_grad(self.unet , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : Union[str, Any]) ->Dict:
'''simple docstring'''
set_requires_grad(self.unet , UpperCAmelCase_)
def SCREAMING_SNAKE_CASE_ (self : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Tuple) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =min(int(num_inference_steps * strength) , UpperCAmelCase_)
lowerCamelCase__: Dict =max(num_inference_steps - init_timestep , 0)
lowerCamelCase__: Tuple =self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def SCREAMING_SNAKE_CASE_ (self : Any , UpperCAmelCase_ : int , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : str , UpperCAmelCase_ : int=None) ->Any:
'''simple docstring'''
if not isinstance(UpperCAmelCase_ , torch.Tensor):
raise ValueError(F"""`image` has to be of type `torch.Tensor` but is {type(UpperCAmelCase_)}""")
lowerCamelCase__: List[Any] =image.to(device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
if isinstance(UpperCAmelCase_ , UpperCAmelCase_):
lowerCamelCase__: int =[
self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(UpperCAmelCase_)
]
lowerCamelCase__: Union[str, Any] =torch.cat(UpperCAmelCase_ , dim=0)
else:
lowerCamelCase__: Any =self.vae.encode(UpperCAmelCase_).latent_dist.sample(UpperCAmelCase_)
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__: Optional[int] =0.1_8215 * init_latents
lowerCamelCase__: Tuple =init_latents.repeat_interleave(UpperCAmelCase_ , dim=0)
lowerCamelCase__: Tuple =randn_tensor(init_latents.shape , generator=UpperCAmelCase_ , device=UpperCAmelCase_ , dtype=UpperCAmelCase_)
# get latents
lowerCamelCase__: Any =self.scheduler.add_noise(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[str] =init_latents
return latents
def SCREAMING_SNAKE_CASE_ (self : Optional[Any] , UpperCAmelCase_ : List[str]) ->List[Any]:
'''simple docstring'''
lowerCamelCase__: Tuple =self.coca_transform(UpperCAmelCase_).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
lowerCamelCase__: Optional[Any] =self.coca_model.generate(transformed_image.to(device=self.device , dtype=self.coca_model.dtype))
lowerCamelCase__: Optional[Any] =self.coca_tokenizer.decode(generated[0].cpu().numpy())
return generated.split("<end_of_text>")[0].replace("<start_of_text>" , "").rstrip(" .,")
def SCREAMING_SNAKE_CASE_ (self : str , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int]) ->Optional[int]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =self.feature_extractor.preprocess(UpperCAmelCase_)
lowerCamelCase__: List[str] =torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
lowerCamelCase__: List[str] =self.clip_model.get_image_features(UpperCAmelCase_)
lowerCamelCase__: int =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =image_embeddings_clip.repeat_interleave(UpperCAmelCase_ , dim=0)
return image_embeddings_clip
@torch.enable_grad()
def SCREAMING_SNAKE_CASE_ (self : Dict , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : str , UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[str] , ) ->Any:
'''simple docstring'''
lowerCamelCase__: str =latents.detach().requires_grad_()
lowerCamelCase__: Dict =self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
# predict the noise residual
lowerCamelCase__: str =self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_).sample
if isinstance(self.scheduler , (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
lowerCamelCase__: Union[str, Any] =self.scheduler.alphas_cumprod[timestep]
lowerCamelCase__: List[Any] =1 - alpha_prod_t
# compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCamelCase__: Optional[int] =(latents - beta_prod_t ** 0.5 * noise_pred) / alpha_prod_t ** 0.5
lowerCamelCase__: str =torch.sqrt(UpperCAmelCase_)
lowerCamelCase__: Any =pred_original_sample * (fac) + latents * (1 - fac)
elif isinstance(self.scheduler , UpperCAmelCase_):
lowerCamelCase__: Dict =self.scheduler.sigmas[index]
lowerCamelCase__: Optional[Any] =latents - sigma * noise_pred
else:
raise ValueError(F"""scheduler type {type(self.scheduler)} not supported""")
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__: Optional[int] =1 / 0.1_8215 * sample
lowerCamelCase__: int =self.vae.decode(UpperCAmelCase_).sample
lowerCamelCase__: Union[str, Any] =(image / 2 + 0.5).clamp(0 , 1)
lowerCamelCase__: List[str] =transforms.Resize(self.feature_extractor_size)(UpperCAmelCase_)
lowerCamelCase__: str =self.normalize(UpperCAmelCase_).to(latents.dtype)
lowerCamelCase__: Optional[int] =self.clip_model.get_image_features(UpperCAmelCase_)
lowerCamelCase__: int =image_embeddings_clip / image_embeddings_clip.norm(p=2 , dim=-1 , keepdim=UpperCAmelCase_)
lowerCamelCase__: str =spherical_dist_loss(UpperCAmelCase_ , UpperCAmelCase_).mean() * clip_guidance_scale
lowerCamelCase__: str =-torch.autograd.grad(UpperCAmelCase_ , UpperCAmelCase_)[0]
if isinstance(self.scheduler , UpperCAmelCase_):
lowerCamelCase__: Tuple =latents.detach() + grads * (sigma**2)
lowerCamelCase__: str =noise_pred_original
else:
lowerCamelCase__: Dict =noise_pred_original - torch.sqrt(UpperCAmelCase_) * grads
return noise_pred, latents
@torch.no_grad()
def __call__(self : Union[str, Any] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : Union[torch.FloatTensor, PIL.Image.Image] , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[str] = None , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : Optional[int] = 512 , UpperCAmelCase_ : float = 0.6 , UpperCAmelCase_ : Optional[int] = 50 , UpperCAmelCase_ : Optional[float] = 7.5 , UpperCAmelCase_ : Optional[int] = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : Optional[float] = 100 , UpperCAmelCase_ : Optional[torch.Generator] = None , UpperCAmelCase_ : Optional[str] = "pil" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : float = 0.8 , UpperCAmelCase_ : float = 0.1 , UpperCAmelCase_ : float = 0.1 , ) ->Union[str, Any]:
'''simple docstring'''
if isinstance(UpperCAmelCase_ , UpperCAmelCase_) and len(UpperCAmelCase_) != batch_size:
raise ValueError(F"""You have passed {batch_size} batch_size, but only {len(UpperCAmelCase_)} generators.""")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""")
if isinstance(UpperCAmelCase_ , torch.Generator) and batch_size > 1:
lowerCamelCase__: Any =[generator] + [None] * (batch_size - 1)
lowerCamelCase__: int =[
("model", self.coca_model is None),
("tokenizer", self.coca_tokenizer is None),
("transform", self.coca_transform is None),
]
lowerCamelCase__: Tuple =[x[0] for x in coca_is_none if x[1]]
lowerCamelCase__: List[Any] =", ".join(UpperCAmelCase_)
# generate prompts with coca model if prompt is None
if content_prompt is None:
if len(UpperCAmelCase_):
raise ValueError(
F"""Content prompt is None and CoCa [{coca_is_none_str}] is None."""
F"""Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""")
lowerCamelCase__: List[Any] =self.get_image_description(UpperCAmelCase_)
if style_prompt is None:
if len(UpperCAmelCase_):
raise ValueError(
F"""Style prompt is None and CoCa [{coca_is_none_str}] is None."""
F""" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline.""")
lowerCamelCase__: Optional[int] =self.get_image_description(UpperCAmelCase_)
# get prompt text embeddings for content and style
lowerCamelCase__: str =self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="pt" , )
lowerCamelCase__: int =self.text_encoder(content_text_input.input_ids.to(self.device))[0]
lowerCamelCase__: int =self.tokenizer(
UpperCAmelCase_ , padding="max_length" , max_length=self.tokenizer.model_max_length , truncation=UpperCAmelCase_ , return_tensors="pt" , )
lowerCamelCase__: Dict =self.text_encoder(style_text_input.input_ids.to(self.device))[0]
lowerCamelCase__: int =slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# duplicate text embeddings for each generation per prompt
lowerCamelCase__: int =text_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0)
# set timesteps
lowerCamelCase__: Optional[Any] ="offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
lowerCamelCase__: str ={}
if accepts_offset:
lowerCamelCase__: Optional[int] =1
self.scheduler.set_timesteps(UpperCAmelCase_ , **UpperCAmelCase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
self.scheduler.timesteps.to(self.device)
lowerCamelCase__ , lowerCamelCase__: List[Any] =self.get_timesteps(UpperCAmelCase_ , UpperCAmelCase_ , self.device)
lowerCamelCase__: Union[str, Any] =timesteps[:1].repeat(UpperCAmelCase_)
# Preprocess image
lowerCamelCase__: Tuple =preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[str] =self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =preprocess(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: str =self.prepare_latents(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , text_embeddings.dtype , self.device , UpperCAmelCase_)
lowerCamelCase__: Dict =slerp(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
if clip_guidance_scale > 0:
lowerCamelCase__: Union[str, Any] =self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: str =self.get_clip_image_embeddings(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =slerp(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
lowerCamelCase__: Optional[Any] =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__: Union[str, Any] =content_text_input.input_ids.shape[-1]
lowerCamelCase__: Any =self.tokenizer([""] , padding="max_length" , max_length=UpperCAmelCase_ , return_tensors="pt")
lowerCamelCase__: int =self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt
lowerCamelCase__: Optional[Any] =uncond_embeddings.repeat_interleave(UpperCAmelCase_ , dim=0)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowerCamelCase__: Tuple =torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
lowerCamelCase__: Union[str, Any] =(batch_size, self.unet.config.in_channels, height // 8, width // 8)
lowerCamelCase__: Any =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not work reproducibly on mps
lowerCamelCase__: Dict =torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device="cpu" , dtype=UpperCAmelCase_).to(
self.device)
else:
lowerCamelCase__: int =torch.randn(UpperCAmelCase_ , generator=UpperCAmelCase_ , device=self.device , dtype=UpperCAmelCase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""")
lowerCamelCase__: Any =latents.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase__: Optional[int] =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase__: Union[str, Any] ="eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
lowerCamelCase__: Any ={}
if accepts_eta:
lowerCamelCase__: Tuple =eta
# check if the scheduler accepts generator
lowerCamelCase__: int ="generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
if accepts_generator:
lowerCamelCase__: Tuple =generator
with self.progress_bar(total=UpperCAmelCase_):
for i, t in enumerate(UpperCAmelCase_):
# expand the latents if we are doing classifier free guidance
lowerCamelCase__: Any =torch.cat([latents] * 2) if do_classifier_free_guidance else latents
lowerCamelCase__: Optional[int] =self.scheduler.scale_model_input(UpperCAmelCase_ , UpperCAmelCase_)
# predict the noise residual
lowerCamelCase__: str =self.unet(UpperCAmelCase_ , UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_).sample
# perform classifier free guidance
if do_classifier_free_guidance:
lowerCamelCase__ , lowerCamelCase__: Union[str, Any] =noise_pred.chunk(2)
lowerCamelCase__: Optional[int] =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# perform clip guidance
if clip_guidance_scale > 0:
lowerCamelCase__: List[str] =(
text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
)
lowerCamelCase__ , lowerCamelCase__: Dict =self.cond_fn(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , )
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase__: Optional[Any] =self.scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , **UpperCAmelCase_).prev_sample
# Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
lowerCamelCase__: List[str] =1 / 0.1_8215 * latents
lowerCamelCase__: Tuple =self.vae.decode(UpperCAmelCase_).sample
lowerCamelCase__: int =(image / 2 + 0.5).clamp(0 , 1)
lowerCamelCase__: Dict =image.cpu().permute(0 , 2 , 3 , 1).numpy()
if output_type == "pil":
lowerCamelCase__: Optional[Any] =self.numpy_to_pil(UpperCAmelCase_)
if not return_dict:
return (image, None)
return StableDiffusionPipelineOutput(images=UpperCAmelCase_ , nsfw_content_detected=UpperCAmelCase_)
| 10 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ = 1_00_00_00 ) -> int:
__lowerCamelCase = set(range(3 , UpperCamelCase__ , 2 ) )
primes.add(2 )
for p in range(3 , UpperCamelCase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , UpperCamelCase__ , UpperCamelCase__ ) ) )
__lowerCamelCase = [float(UpperCamelCase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(UpperCamelCase__ , limit + 1 , UpperCamelCase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'{solution() = }')
| 67 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _a ( SCREAMING_SNAKE_CASE : Any ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: int = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
__lowerCAmelCase: Union[str, Any] = True if 'large' in model_name or 'huge' in model_name else False
__lowerCAmelCase: Union[str, Any] = True if 'large' in model_name or 'huge' in model_name else False
__lowerCAmelCase: int = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
__lowerCAmelCase: str = [3, 3, 3, 3]
__lowerCAmelCase: Union[str, Any] = [5, 5, 5, 5]
elif "fl4" in model_name:
__lowerCAmelCase: Tuple = [4, 4, 4, 4]
__lowerCAmelCase: Dict = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
__lowerCAmelCase: Any = [3, 3, 3, 3]
if "lrf" in model_name:
__lowerCAmelCase: List[str] = [3, 3, 3, 3]
else:
__lowerCAmelCase: Optional[Any] = [2, 2, 2, 2]
if "tiny" in model_name:
__lowerCAmelCase: Any = 96
elif "small" in model_name:
__lowerCAmelCase: str = 96
elif "base" in model_name:
__lowerCAmelCase: Optional[int] = 1_28
elif "large" in model_name:
__lowerCAmelCase: Any = 1_92
elif "xlarge" in model_name:
__lowerCAmelCase: Dict = 2_56
elif "huge" in model_name:
__lowerCAmelCase: Tuple = 3_52
# set label information
__lowerCAmelCase: Optional[int] = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
__lowerCAmelCase: Any = 'imagenet-22k-id2label.json'
else:
__lowerCAmelCase: int = 'imagenet-1k-id2label.json'
__lowerCAmelCase: int = json.load(open(hf_hub_download(UpperCamelCase__ , UpperCamelCase__ , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase: str = {int(UpperCamelCase__ ): v for k, v in idalabel.items()}
__lowerCAmelCase: List[str] = {v: k for k, v in idalabel.items()}
__lowerCAmelCase: Any = FocalNetConfig(
embed_dim=UpperCamelCase__ , depths=UpperCamelCase__ , focal_levels=UpperCamelCase__ , focal_windows=UpperCamelCase__ , use_conv_embed=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid=UpperCamelCase__ , use_post_layernorm=UpperCamelCase__ , use_layerscale=UpperCamelCase__ , )
return config
def _a ( SCREAMING_SNAKE_CASE : Tuple ) -> Optional[Any]:
"""simple docstring"""
if "patch_embed.proj" in name:
__lowerCAmelCase: List[str] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__lowerCAmelCase: Optional[int] = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
__lowerCAmelCase: Dict = 'encoder.' + name
if "encoder.layers" in name:
__lowerCAmelCase: List[str] = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
__lowerCAmelCase: List[str] = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
__lowerCAmelCase: Optional[Any] = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
__lowerCAmelCase: Optional[int] = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
__lowerCAmelCase: List[Any] = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
__lowerCAmelCase: Tuple = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
__lowerCAmelCase: Dict = 'layernorm.weight'
if name == "norm.bias":
__lowerCAmelCase: str = 'layernorm.bias'
if "head" in name:
__lowerCAmelCase: Tuple = name.replace('head' , 'classifier' )
else:
__lowerCAmelCase: Optional[int] = 'focalnet.' + name
return name
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any , SCREAMING_SNAKE_CASE : Dict=False ) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase: Union[str, Any] = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
__lowerCAmelCase: Tuple = model_name_to_url[model_name]
print('Checkpoint URL: ' , UpperCamelCase__ )
__lowerCAmelCase: List[Any] = torch.hub.load_state_dict_from_url(UpperCamelCase__ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
__lowerCAmelCase: List[Any] = state_dict.pop(UpperCamelCase__ )
__lowerCAmelCase: Dict = val
__lowerCAmelCase: Tuple = get_focalnet_config(UpperCamelCase__ )
__lowerCAmelCase: Any = FocalNetForImageClassification(UpperCamelCase__ )
model.eval()
# load state dict
model.load_state_dict(UpperCamelCase__ )
# verify conversion
__lowerCAmelCase: List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase: List[str] = BitImageProcessor(
do_resize=UpperCamelCase__ , size={'shortest_edge': 2_56} , resample=PILImageResampling.BILINEAR , do_center_crop=UpperCamelCase__ , crop_size=2_24 , do_normalize=UpperCamelCase__ , image_mean=UpperCamelCase__ , image_std=UpperCamelCase__ , )
__lowerCAmelCase: Optional[int] = Image.open(requests.get(UpperCamelCase__ , stream=UpperCamelCase__ ).raw )
__lowerCAmelCase: Dict = processor(images=UpperCamelCase__ , return_tensors='pt' )
__lowerCAmelCase: Tuple = transforms.Compose(
[
transforms.Resize(2_56 ),
transforms.CenterCrop(2_24 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.4_8_5, 0.4_5_6, 0.4_0_6] , std=[0.2_2_9, 0.2_2_4, 0.2_2_5] ),
] )
__lowerCAmelCase: Optional[Any] = image_transforms(UpperCamelCase__ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , UpperCamelCase__ , atol=1E-4 )
__lowerCAmelCase: List[Any] = model(**UpperCamelCase__ )
__lowerCAmelCase: Optional[int] = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
__lowerCAmelCase: Any = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] )
elif model_name == "focalnet-tiny-lrf":
__lowerCAmelCase: Dict = torch.tensor([1.1_6_6_9, 0.0_1_2_5, -0.1_6_9_5] )
elif model_name == "focalnet-small":
__lowerCAmelCase: str = torch.tensor([0.4_9_1_7, -0.0_4_3_0, 0.1_3_4_1] )
elif model_name == "focalnet-small-lrf":
__lowerCAmelCase: List[str] = torch.tensor([-0.2_5_8_8, -0.5_3_4_2, -0.2_3_3_1] )
elif model_name == "focalnet-base":
__lowerCAmelCase: Union[str, Any] = torch.tensor([-0.1_6_5_5, -0.4_0_9_0, -0.1_7_3_0] )
elif model_name == "focalnet-base-lrf":
__lowerCAmelCase: int = torch.tensor([0.5_3_0_6, -0.0_4_8_3, -0.3_9_2_8] )
assert torch.allclose(outputs.logits[0, :3] , UpperCamelCase__ , atol=1E-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(f'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(UpperCamelCase__ )
processor.save_pretrained(UpperCamelCase__ )
if push_to_hub:
print(f'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(f'''{model_name}''' )
processor.push_to_hub(f'''{model_name}''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''focalnet-tiny''',
type=str,
help='''Name of the FocalNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub.''',
)
_a = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 322 | '''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class a__ :
def __init__( self : Union[str, Any] , a : Union[str, Any] , a : Tuple=13 , a : Optional[Any]=7 , a : List[Any]=True , a : Optional[Any]=True , a : Any=True , a : Union[str, Any]=99 , a : Any=32 , a : int=5 , a : Optional[int]=4 , a : Union[str, Any]=37 , a : Optional[Any]="gelu" , a : Union[str, Any]=0.1 , a : Any=0.1 , a : Optional[int]=5_12 , a : int=16 , a : Optional[Any]=2 , a : Union[str, Any]=0.02 , a : Any=3 , a : Dict=4 , a : Any=None , ):
"""simple docstring"""
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout_prob
__lowerCamelCase = attention_probs_dropout_prob
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
__lowerCamelCase = self.vocab_size - 1
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_token_type_ids:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCamelCase = None
__lowerCamelCase = None
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase = OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
__lowerCamelCase = ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : Dict , a : List[str] , a : Tuple , a : List[Any] , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModel(config=a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , head_mask=a )
__lowerCamelCase = model(a , token_type_ids=a )
__lowerCamelCase = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Union[str, Any] , a : Dict , a : Union[str, Any] , a : Tuple , *a : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : Tuple , a : Optional[int] , a : Union[str, Any] , a : Optional[Any] , *a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTDoubleHeadsModel(a )
model.to(a )
model.eval()
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , a : int , a : Dict , a : Optional[Any] , a : str , *a : int ):
"""simple docstring"""
__lowerCamelCase = self.num_labels
__lowerCamelCase = OpenAIGPTForSequenceClassification(a )
model.to(a )
model.eval()
__lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase = model(a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.prepare_config_and_inputs()
(
(
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) , (
__lowerCamelCase
) ,
) = config_and_inputs
__lowerCamelCase = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''head_mask''': head_mask,
}
return config, inputs_dict
@require_torch
class a__ ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
lowerCamelCase : List[str] =(
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
lowerCamelCase : str =(
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
lowerCamelCase : Optional[int] =(
{
"feature-extraction": OpenAIGPTModel,
"text-classification": OpenAIGPTForSequenceClassification,
"text-generation": OpenAIGPTLMHeadModel,
"zero-shot": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : Tuple , a : Optional[int] , a : int , a : str , a : Any ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , a : int , a : Optional[int] , a : str=False ):
"""simple docstring"""
__lowerCamelCase = super()._prepare_for_class(a , a , return_labels=a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=a , )
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = inputs_dict['''labels''']
__lowerCamelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=a , )
__lowerCamelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=a )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=a , n_embd=37 )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*a )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*a )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase = OpenAIGPTModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
class a__ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = OpenAIGPTLMHeadModel.from_pretrained('''openai-gpt''' )
model.to(a )
__lowerCamelCase = torch.tensor([[4_81, 47_35, 5_44]] , dtype=torch.long , device=a ) # the president is
__lowerCamelCase = [
4_81,
47_35,
5_44,
2_46,
9_63,
8_70,
7_62,
2_39,
2_44,
4_04_77,
2_44,
2_49,
7_19,
8_81,
4_87,
5_44,
2_40,
2_44,
6_03,
4_81,
] # the president is a very good man. " \n " i\'m sure he is, " said the
__lowerCamelCase = model.generate(a , do_sample=a )
self.assertListEqual(output_ids[0].tolist() , a )
| 67 | 0 |
'''simple docstring'''
from __future__ import annotations
UpperCamelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
UpperCAmelCase__ : Union[str, Any] = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
UpperCAmelCase__ : str = 1
UpperCAmelCase__ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
UpperCAmelCase__ : Dict = init[0]
UpperCAmelCase__ : Union[str, Any] = init[1]
UpperCAmelCase__ : Any = 0
UpperCAmelCase__ : Union[str, Any] = g + heuristic[x][y] # cost from starting cell to destination cell
UpperCAmelCase__ : str = [[f, g, x, y]]
UpperCAmelCase__ : str = False # flag that is set when search is complete
UpperCAmelCase__ : List[str] = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
UpperCAmelCase__ : Union[str, Any] = cell.pop()
UpperCAmelCase__ : List[str] = next_cell[2]
UpperCAmelCase__ : Any = next_cell[3]
UpperCAmelCase__ : str = next_cell[1]
if x == goal[0] and y == goal[1]:
UpperCAmelCase__ : Optional[Any] = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
UpperCAmelCase__ : Union[str, Any] = x + DIRECTIONS[i][0]
UpperCAmelCase__ : Optional[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
UpperCAmelCase__ : Any = g + cost
UpperCAmelCase__ : Tuple = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
UpperCAmelCase__ : Optional[int] = 1
UpperCAmelCase__ : int = i
UpperCAmelCase__ : Union[str, Any] = []
UpperCAmelCase__ : Optional[Any] = goal[0]
UpperCAmelCase__ : Any = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
UpperCAmelCase__ : Tuple = x - DIRECTIONS[action[x][y]][0]
UpperCAmelCase__ : str = y - DIRECTIONS[action[x][y]][1]
UpperCAmelCase__ : Optional[Any] = xa
UpperCAmelCase__ : Tuple = ya
invpath.append([x, y] )
UpperCAmelCase__ : Any = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase__ = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase__ = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase__ = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase__ = 9_9
UpperCamelCase__ , UpperCamelCase__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 181 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def a_ ( __snake_case : int , __snake_case : Optional[int] , __snake_case : List[str] ) -> int:
"""simple docstring"""
# Construct model
if gpta_config_file == "":
lowerCamelCase_ =GPTaConfig()
else:
lowerCamelCase_ =GPTaConfig.from_json_file(UpperCamelCase__ )
lowerCamelCase_ =GPTaModel(UpperCamelCase__ )
# Load weights from numpy
load_tf_weights_in_gpta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
lowerCamelCase_ =pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
lowerCamelCase_ =pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''' )
torch.save(model.state_dict() , UpperCamelCase__ )
print(F'''Save configuration file to {pytorch_config_dump_path}''' )
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--gpt2_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--gpt2_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained OpenAI model. \n"""
"""This specifies the model architecture."""
),
)
a_ : int = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 75 | '''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
class a__ ( UpperCAmelCase__ ):
def __init__( self : Optional[Any] , a : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = nn.ModuleList(a )
def SCREAMING_SNAKE_CASE__ ( self : Any , a : torch.FloatTensor , a : Union[torch.Tensor, float, int] , a : torch.Tensor , a : List[torch.tensor] , a : List[float] , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[torch.Tensor] = None , a : Optional[Dict[str, Any]] = None , a : bool = False , a : bool = True , ):
"""simple docstring"""
for i, (image, scale, controlnet) in enumerate(zip(a , a , self.nets ) ):
__lowerCamelCase , __lowerCamelCase = controlnet(
a , a , a , a , a , a , a , a , a , a , a , )
# merge samples
if i == 0:
__lowerCamelCase , __lowerCamelCase = down_samples, mid_sample
else:
__lowerCamelCase = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(a , a )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def SCREAMING_SNAKE_CASE__ ( self : Any , a : Union[str, os.PathLike] , a : bool = True , a : Callable = None , a : bool = False , a : Optional[str] = None , ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
a , is_main_process=a , save_function=a , safe_serialization=a , variant=a , )
idx += 1
__lowerCamelCase = model_path_to_save + f"""_{idx}"""
@classmethod
def SCREAMING_SNAKE_CASE__ ( cls : List[str] , a : Optional[Union[str, os.PathLike]] , **a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = 0
__lowerCamelCase = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__lowerCamelCase = pretrained_model_path
while os.path.isdir(a ):
__lowerCamelCase = ControlNetModel.from_pretrained(a , **a )
controlnets.append(a )
idx += 1
__lowerCamelCase = pretrained_model_path + f"""_{idx}"""
logger.info(f"""{len(a )} controlnets loaded from {pretrained_model_path}.""" )
if len(a ) == 0:
raise ValueError(
f"""No ControlNets found under {os.path.dirname(a )}. Expected at least {pretrained_model_path + '_0'}.""" )
return cls(a )
| 67 | 0 |
def A ( _lowercase = 10 , _lowercase = 1_000 , _lowercase = True ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError('''Invalid value for min_val or max_val (min_value < max_value)''' )
return min_val if option else max_val
def A ( _lowercase , _lowercase ):
return int((number_a + number_a) / 2 )
def A ( _lowercase , _lowercase , _lowercase ):
assert (
isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ ) and isinstance(UpperCamelCase__ , UpperCamelCase__ )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError('''argument value for lower and higher must be(lower > higher)''' )
if not lower < to_guess < higher:
raise ValueError(
'''guess value must be within the range of lower and higher value''' )
def answer(_lowercase ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print('''started...''' )
SCREAMING_SNAKE_CASE : List[str] = lower
SCREAMING_SNAKE_CASE : List[Any] = higher
SCREAMING_SNAKE_CASE : Any = []
while True:
SCREAMING_SNAKE_CASE : Dict = get_avg(UpperCamelCase__ , UpperCamelCase__ )
last_numbers.append(UpperCamelCase__ )
if answer(UpperCamelCase__ ) == "low":
SCREAMING_SNAKE_CASE : Tuple = number
elif answer(UpperCamelCase__ ) == "high":
SCREAMING_SNAKE_CASE : Dict = number
else:
break
print(f"""guess the number : {last_numbers[-1]}""" )
print(f"""details : {last_numbers!s}""" )
def A ( ):
SCREAMING_SNAKE_CASE : List[str] = int(input('''Enter lower value : ''' ).strip() )
SCREAMING_SNAKE_CASE : int = int(input('''Enter high value : ''' ).strip() )
SCREAMING_SNAKE_CASE : str = int(input('''Enter value to guess : ''' ).strip() )
guess_the_number(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
main()
| 182 | '''simple docstring'''
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
__UpperCAmelCase =logging.get_logger(__name__)
@add_end_docstrings(UpperCAmelCase__ )
class a__ ( UpperCAmelCase__ ):
def __init__( self : List[str] , *a : Union[str, Any] , **a : Optional[Any] ):
"""simple docstring"""
super().__init__(*a , **a )
requires_backends(self , '''vision''' )
self.check_model_type(a )
def __call__( self : Any , a : Union[str, List[str], "Image.Image", List["Image.Image"]] , **a : Optional[int] ):
"""simple docstring"""
return super().__call__(a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Any ):
"""simple docstring"""
return {}, {}, {}
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , a : List[str] ):
"""simple docstring"""
__lowerCamelCase = load_image(a )
__lowerCamelCase = image.size
__lowerCamelCase = self.image_processor(images=a , return_tensors=self.framework )
return model_inputs
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = self.model(**a )
return model_outputs
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , a : Any ):
"""simple docstring"""
__lowerCamelCase = model_outputs.predicted_depth
__lowerCamelCase = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='''bicubic''' , align_corners=a )
__lowerCamelCase = prediction.squeeze().cpu().numpy()
__lowerCamelCase = (output * 2_55 / np.max(a )).astype('''uint8''' )
__lowerCamelCase = Image.fromarray(a )
__lowerCamelCase = {}
__lowerCamelCase = predicted_depth
__lowerCamelCase = depth
return output_dict
| 67 | 0 |
"""simple docstring"""
from __future__ import annotations
def lowercase (snake_case__ : str , snake_case__ : Optional[Any] ) -> list[str]:
'''simple docstring'''
if nth_term == "":
return [""]
lowerCAmelCase = int(UpperCamelCase__ )
lowerCAmelCase = int(UpperCamelCase__ )
lowerCAmelCase = []
for temp in range(int(UpperCamelCase__ ) ):
series.append(f'''1 / {pow(temp + 1 , int(UpperCamelCase__ ) )}''' if series else """1""" )
return series
if __name__ == "__main__":
import doctest
doctest.testmod()
a = int(input('Enter the last number (nth term) of the P-Series'))
a = int(input('Enter the power for P-Series'))
print('Formula of P-Series => 1+1/2^p+1/3^p ..... 1/n^p')
print(p_series(nth_term, power))
| 155 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
__UpperCAmelCase =["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : List[Any] = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : List[str] = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( __lowerCamelCase : Any , __lowerCamelCase : Optional[Any] ):
return np.sqrt(np.sum((np.asarray(UpperCamelCase__ ) - np.asarray(UpperCamelCase__ )) ** 2 ) )
def lowerCamelCase__ ( __lowerCamelCase : Optional[int] , __lowerCamelCase : Optional[int] ):
return sum((va - va) ** 2 for va, va in zip(UpperCamelCase__ , UpperCamelCase__ ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 114 | '''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
__lowerCamelCase = '''ylacombe/bark-small'''
__lowerCamelCase = tempfile.mkdtemp()
__lowerCamelCase = '''en_speaker_1'''
__lowerCamelCase = '''This is a test string'''
__lowerCamelCase = '''speaker_embeddings_path.json'''
__lowerCamelCase = '''speaker_embeddings'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , **a : Dict ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.checkpoint , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
processor.save_pretrained(self.tmpdirname )
__lowerCamelCase = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
__lowerCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__lowerCamelCase = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
__lowerCamelCase = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
__lowerCamelCase = 35
__lowerCamelCase = 2
__lowerCamelCase = 8
__lowerCamelCase = {
'''semantic_prompt''': np.ones(a ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from npz file
__lowerCamelCase = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(a , **a )
__lowerCamelCase = processor(text=self.input_string , voice_preset=a )
__lowerCamelCase = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(a , np.array([] ) ).tolist() )
# test loading voice preset from the hub
__lowerCamelCase = processor(text=self.input_string , voice_preset=self.voice_preset )
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
__lowerCamelCase = self.get_tokenizer()
__lowerCamelCase = BarkProcessor(tokenizer=a )
__lowerCamelCase = processor(text=self.input_string )
__lowerCamelCase = tokenizer(
self.input_string , padding='''max_length''' , max_length=2_56 , add_special_tokens=a , return_attention_mask=a , return_token_type_ids=a , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 67 | 0 |
from collections.abc import Sequence
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
return sum(c * (x**i) for i, c in enumerate(UpperCamelCase__ ) )
def lowercase( UpperCamelCase_ , UpperCamelCase_ ) -> float:
'''simple docstring'''
UpperCamelCase = 0.0
for coeff in reversed(UpperCamelCase__ ):
UpperCamelCase = result * x + coeff
return result
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = (0.0, 0.0, 5.0, 9.3, 7.0)
_SCREAMING_SNAKE_CASE = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 343 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={"configuration_vit_msn": ["VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP", "ViTMSNConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST",
"ViTMSNModel",
"ViTMSNForImageClassification",
"ViTMSNPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import numpy as np
def UpperCamelCase_( _snake_case : Union[str, Any] ):
"""simple docstring"""
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 218 | '''simple docstring'''
import re
from filelock import FileLock
try:
import nltk
__UpperCAmelCase =True
except (ImportError, ModuleNotFoundError):
__UpperCAmelCase =False
if NLTK_AVAILABLE:
with FileLock(".lock") as lock:
nltk.download("punkt", quiet=True)
def __lowerCAmelCase ( UpperCamelCase__ ) -> str:
re.sub('''<n>''' , '''''' , UpperCamelCase__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(UpperCamelCase__ ) )
| 67 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
return int((input_a, input_a).count(0 ) != 0 )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 28 | '''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
__UpperCAmelCase ={
"abeja/gpt-neox-japanese-2.7b": "https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json",
}
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] ="gpt_neox_japanese"
def __init__( self : List[Any] , a : Tuple=3_20_00 , a : Dict=25_60 , a : Union[str, Any]=32 , a : Dict=32 , a : Dict=4 , a : Optional[Any]="gelu" , a : Any=1.00 , a : str=1_00_00 , a : List[str]=20_48 , a : str=0.02 , a : Union[str, Any]=1e-5 , a : Optional[Any]=True , a : str=3_19_96 , a : List[str]=3_19_99 , a : str=0.1 , a : Union[str, Any]=0.0 , **a : Optional[Any] , ):
"""simple docstring"""
super().__init__(bos_token_id=a , eos_token_id=a , **a )
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = rotary_pct
__lowerCamelCase = rotary_emb_base
__lowerCamelCase = initializer_range
__lowerCamelCase = layer_norm_eps
__lowerCamelCase = use_cache
__lowerCamelCase = attention_dropout
__lowerCamelCase = hidden_dropout
| 67 | 0 |
from collections.abc import Sequence
from queue import Queue
class _lowercase :
def __init__( self : str , snake_case : Any , snake_case : Optional[int] , snake_case : Tuple , snake_case : Union[str, Any]=None , snake_case : int=None ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = start
UpperCamelCase_ : Tuple = end
UpperCamelCase_ : str = val
UpperCamelCase_ : Tuple = (start + end) // 2
UpperCamelCase_ : Union[str, Any] = left
UpperCamelCase_ : Union[str, Any] = right
def __repr__( self : int ) -> str:
"""simple docstring"""
return f"SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})"
class _lowercase :
def __init__( self : Dict , snake_case : Sequence , snake_case : Optional[Any] ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = collection
UpperCamelCase_ : Tuple = function
if self.collection:
UpperCamelCase_ : Optional[Any] = self._build_tree(0 , len(snake_case ) - 1 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Union[str, Any] , snake_case : str ) -> List[str]:
"""simple docstring"""
self._update_tree(self.root , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : List[Any] , snake_case : Tuple ) -> Union[str, Any]:
"""simple docstring"""
return self._query_range(self.root , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : Tuple , snake_case : str ) -> Union[str, Any]:
"""simple docstring"""
if start == end:
return SegmentTreeNode(snake_case , snake_case , self.collection[start] )
UpperCamelCase_ : Dict = (start + end) // 2
UpperCamelCase_ : Any = self._build_tree(snake_case , snake_case )
UpperCamelCase_ : int = self._build_tree(mid + 1 , snake_case )
return SegmentTreeNode(snake_case , snake_case , self.fn(left.val , right.val ) , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : str , snake_case : Optional[Any] , snake_case : int ) -> List[str]:
"""simple docstring"""
if node.start == i and node.end == i:
UpperCamelCase_ : List[Any] = val
return
if i <= node.mid:
self._update_tree(node.left , snake_case , snake_case )
else:
self._update_tree(node.right , snake_case , snake_case )
UpperCamelCase_ : Union[str, Any] = self.fn(node.left.val , node.right.val )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : Union[str, Any] , snake_case : str , snake_case : str ) -> List[Any]:
"""simple docstring"""
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , snake_case , snake_case )
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , snake_case , node.mid ) , self._query_range(node.right , node.mid + 1 , snake_case ) , )
else:
# range in right child tree
return self._query_range(node.right , snake_case , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
if self.root is not None:
UpperCamelCase_ : str = Queue()
queue.put(self.root )
while not queue.empty():
UpperCamelCase_ : Tuple = queue.get()
yield node
if node.left is not None:
queue.put(node.left )
if node.right is not None:
queue.put(node.right )
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print('*' * 50)
a_ = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 175 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> int:
while second != 0:
__lowerCamelCase = first & second
first ^= second
__lowerCamelCase = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase =int(input("Enter the first number: ").strip())
__UpperCAmelCase =int(input("Enter the second number: ").strip())
print(f'{add(first, second) = }')
| 67 | 0 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ = XLMTokenizer
lowercase_ = False
def SCREAMING_SNAKE_CASE_ (self : Tuple) ->Union[str, Any]:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase__: Dict =[
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase__: Tuple =dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_))))
lowerCamelCase__: int =["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase__: str =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"])
lowerCamelCase__: int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"])
with open(self.vocab_file , "w") as fp:
fp.write(json.dumps(UpperCAmelCase_))
with open(self.merges_file , "w") as fp:
fp.write("\n".join(UpperCAmelCase_))
def SCREAMING_SNAKE_CASE_ (self : Optional[int] , UpperCAmelCase_ : Any) ->Union[str, Any]:
'''simple docstring'''
lowerCamelCase__: List[str] ="lower newer"
lowerCamelCase__: str ="lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE_ (self : Optional[int]) ->str:
'''simple docstring'''
lowerCamelCase__: Tuple =XLMTokenizer(self.vocab_file , self.merges_file)
lowerCamelCase__: Tuple ="lower"
lowerCamelCase__: Dict =["low", "er</w>"]
lowerCamelCase__: Optional[Any] =tokenizer.tokenize(UpperCAmelCase_)
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: str =tokens + ["<unk>"]
lowerCamelCase__: Optional[Any] =[14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase_) , UpperCAmelCase_)
@slow
def SCREAMING_SNAKE_CASE_ (self : List[Any]) ->List[str]:
'''simple docstring'''
lowerCamelCase__: Union[str, Any] =XLMTokenizer.from_pretrained("xlm-mlm-en-2048")
lowerCamelCase__: List[Any] =tokenizer.encode("sequence builders" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =tokenizer.encode("multi-sequence build" , add_special_tokens=UpperCAmelCase_)
lowerCamelCase__: Optional[int] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_)
lowerCamelCase__: Union[str, Any] =tokenizer.build_inputs_with_special_tokens(UpperCAmelCase_ , UpperCAmelCase_)
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1]
| 10 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase ={
"configuration_time_series_transformer": [
"TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TimeSeriesTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase =[
"TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimeSeriesTransformerForPrediction",
"TimeSeriesTransformerModel",
"TimeSeriesTransformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 67 | 0 |
import itertools
import math
def _a ( SCREAMING_SNAKE_CASE : List[Any] ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(UpperCamelCase__ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _a ( ) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase: List[str] = 2
while True:
if is_prime(UpperCamelCase__ ):
yield num
num += 1
def _a ( SCREAMING_SNAKE_CASE : Tuple = 1_00_01 ) -> int:
"""simple docstring"""
return next(itertools.islice(prime_generator() , nth - 1 , UpperCamelCase__ ) )
if __name__ == "__main__":
print(f"{solution() = }")
| 322 | '''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase =logging.get_logger(__name__)
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> None:
__lowerCamelCase = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ ), f"""{len(UpperCamelCase__ )} != {len(UpperCamelCase__ )}"""
dest_layers.load_state_dict(layers_to_copy.state_dict() )
__UpperCAmelCase ={
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase ={
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
try:
__lowerCamelCase = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f"""no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first"""
f""" {n_student}""" )
return list(range(UpperCamelCase__ ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f"""Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}""" )
elif n_teacher == n_student:
return list(range(UpperCamelCase__ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = "student" , UpperCamelCase__ = None , UpperCamelCase__ = None , UpperCamelCase__=False , UpperCamelCase__=None , UpperCamelCase__=None , **UpperCamelCase__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__lowerCamelCase = '''encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher.'''
assert (e is not None) or (d is not None), _msg
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
AutoTokenizer.from_pretrained(UpperCamelCase__ ).save_pretrained(UpperCamelCase__ ) # purely for convenience
__lowerCamelCase = AutoModelForSeqaSeqLM.from_pretrained(UpperCamelCase__ ).eval()
else:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ ), f"""teacher must be a model or string got type {type(UpperCamelCase__ )}"""
__lowerCamelCase = teacher.config.to_diff_dict()
try:
__lowerCamelCase , __lowerCamelCase = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
init_kwargs.update({'''encoder_layers''': e, '''decoder_layers''': d} )
except AttributeError: # T5
if hasattr(teacher.config , '''num_encoder_layers''' ):
__lowerCamelCase , __lowerCamelCase = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__lowerCamelCase , __lowerCamelCase = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__lowerCamelCase = teacher_e
if d is None:
__lowerCamelCase = teacher_d
if hasattr(teacher.config , '''num_encoder_layers''' ):
init_kwargs.update({'''num_encoder_layers''': e, '''num_decoder_layers''': d} )
else:
init_kwargs.update({'''num_layers''': e, '''num_decoder_layers''': d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(UpperCamelCase__ )
# Copy weights
__lowerCamelCase = teacher.config_class(**UpperCamelCase__ )
__lowerCamelCase = AutoModelForSeqaSeqLM.from_config(UpperCamelCase__ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__lowerCamelCase = student.load_state_dict(teacher.state_dict() , strict=UpperCamelCase__ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__lowerCamelCase , __lowerCamelCase = list(range(UpperCamelCase__ ) ), list(range(UpperCamelCase__ ) )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to"""
f""" {save_path}""" )
student.save_pretrained(UpperCamelCase__ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
if d_layers_to_copy is None:
__lowerCamelCase = pick_layers_to_copy(UpperCamelCase__ , UpperCamelCase__ )
try:
if hasattr(
UpperCamelCase__ , '''prophetnet''' ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , UpperCamelCase__ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , UpperCamelCase__ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , UpperCamelCase__ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , UpperCamelCase__ )
copy_layers(teacher.decoder.block , student.decoder.block , UpperCamelCase__ )
logger.info(
f"""Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}""" )
__lowerCamelCase = {
'''teacher_type''': teacher.config.model_type,
'''copied_encoder_layers''': e_layers_to_copy,
'''copied_decoder_layers''': d_layers_to_copy,
}
student.save_pretrained(UpperCamelCase__ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 67 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCamelCase__ = {
'''configuration_vivit''': ['''VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''VivitConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['''VivitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = [
'''VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''VivitModel''',
'''VivitPreTrainedModel''',
'''VivitForVideoClassification''',
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 181 | '''simple docstring'''
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
__UpperCAmelCase =["gpt2"]
__UpperCAmelCase ="gpt2"
if is_tf_available():
class a__ ( tf.Module ):
def __init__( self : str , a : Union[str, Any] ):
"""simple docstring"""
super().__init__()
__lowerCamelCase = tokenizer
__lowerCamelCase = AutoConfig.from_pretrained(a )
__lowerCamelCase = TFGPTaLMHeadModel.from_config(a )
@tf.function(input_signature=(tf.TensorSpec((None,) , tf.string , name='''text''' ),) )
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple ):
"""simple docstring"""
__lowerCamelCase = self.tokenizer(a )
__lowerCamelCase = tokenized['''input_ids'''].to_tensor()
__lowerCamelCase = tf.cast(input_ids_dense > 0 , tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
__lowerCamelCase = self.model(input_ids=a , attention_mask=a )['''logits''']
return outputs
@require_tf
@require_keras_nlp
class a__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
super().setUp()
__lowerCamelCase = [GPTaTokenizer.from_pretrained(a ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
__lowerCamelCase = [TFGPTaTokenizer.from_pretrained(a ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
__lowerCamelCase = [
'''This is a straightforward English test sentence.''',
'''This one has some weird characters\rto\nsee\r\nif those\u00E9break things.''',
'''Now we\'re going to add some Chinese: 一 二 三 一二三''',
'''And some much more rare Chinese: 齉 堃 齉堃''',
'''Je vais aussi écrire en français pour tester les accents''',
'''Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ''',
]
__lowerCamelCase = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in self.test_sentences:
__lowerCamelCase = tokenizer([test_inputs] , return_tensors='''tf''' )
__lowerCamelCase = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
__lowerCamelCase = python_outputs[key].numpy()
__lowerCamelCase = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(a , tf.intaa ) == tf_outputs_values ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.function(a )
for test_inputs in self.test_sentences:
__lowerCamelCase = tf.constant(a )
__lowerCamelCase = compiled_tokenizer(a )
__lowerCamelCase = tf_tokenizer(a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = ModelToSave(tokenizer=a )
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = model.serving(a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
__lowerCamelCase = Path(a ) / '''saved.model'''
tf.saved_model.save(a , a , signatures={'''serving_default''': model.serving} )
__lowerCamelCase = tf.saved_model.load(a )
__lowerCamelCase = loaded_model.signatures['''serving_default'''](a )['''output_0''']
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a ) # Build model with some sample inputs
__lowerCamelCase = tf_tokenizer.get_config()
__lowerCamelCase = TFGPTaTokenizer.from_config(a )
__lowerCamelCase = model_from_config(a )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
__lowerCamelCase = 12_31_23
for max_length in [3, 5, 10_24]:
__lowerCamelCase = tf.convert_to_tensor([self.test_sentences[0]] )
__lowerCamelCase = tf_tokenizer(a , max_length=a )
__lowerCamelCase = out['''input_ids'''].numpy().shape[1]
assert out_length == max_length
| 67 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
a_ : List[str] = logging.getLogger(__name__)
def a_ ( __snake_case : Dict , __snake_case : Dict ) -> Dict:
"""simple docstring"""
return (preds == labels).mean()
@dataclass
class __UpperCamelCase :
lowercase : str =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowercase : Optional[str] =field(
default=UpperCAmelCase__ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
@dataclass
class __UpperCamelCase :
lowercase : str =field(metadata={'help': 'The name of the task to train on: ' + ', '.join(processors.keys() )} )
lowercase : str =field(metadata={'help': 'Should contain the data files for the task.'} )
lowercase : int =field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase : bool =field(
default=UpperCAmelCase__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
def a_ ( ) -> List[Any]:
"""simple docstring"""
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ =HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''' , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''' , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
try:
lowerCamelCase_ =processors[data_args.task_name]()
lowerCamelCase_ =processor.get_labels()
lowerCamelCase_ =len(UpperCamelCase__ )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ =AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
lowerCamelCase_ =AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ =AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool('''.ckpt''' in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCamelCase_ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCamelCase_ =(
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(__snake_case : Any ) -> Dict:
lowerCamelCase_ =np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(UpperCamelCase__ , p.label_ids )}
# Data collator
lowerCamelCase_ =DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCamelCase_ =Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCamelCase_ ={}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
lowerCamelCase_ =trainer.evaluate()
lowerCamelCase_ =os.path.join(training_args.output_dir , '''eval_results.txt''' )
if trainer.is_world_master():
with open(UpperCamelCase__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''' , UpperCamelCase__ , UpperCamelCase__ )
writer.write('''%s = %s\n''' % (key, value) )
results.update(UpperCamelCase__ )
return results
def a_ ( __snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 75 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> Optional[int]:
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
else:
return a * actual_power(UpperCamelCase__ , int(b / 2 ) ) * actual_power(UpperCamelCase__ , int(b / 2 ) )
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> float:
if b < 0:
return 1 / actual_power(UpperCamelCase__ , UpperCamelCase__ )
return actual_power(UpperCamelCase__ , UpperCamelCase__ )
if __name__ == "__main__":
print(power(-2, -3))
| 67 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 182 | '''simple docstring'''
import logging
import os
from .state import PartialState
class a__ ( logging.LoggerAdapter ):
@staticmethod
def SCREAMING_SNAKE_CASE__ ( a : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def SCREAMING_SNAKE_CASE__ ( self : int , a : Optional[int] , a : str , *a : Optional[int] , **a : List[Any] ):
"""simple docstring"""
if PartialState._shared_state == {}:
raise RuntimeError(
'''You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.''' )
__lowerCamelCase = kwargs.pop('''main_process_only''' , a )
__lowerCamelCase = kwargs.pop('''in_order''' , a )
if self.isEnabledFor(a ):
if self._should_log(a ):
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
elif in_order:
__lowerCamelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
__lowerCamelCase , __lowerCamelCase = self.process(a , a )
self.logger.log(a , a , *a , **a )
state.wait_for_everyone()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ = None ) -> Optional[int]:
if log_level is None:
__lowerCamelCase = os.environ.get('''ACCELERATE_LOG_LEVEL''' , UpperCamelCase__ )
__lowerCamelCase = logging.getLogger(UpperCamelCase__ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCamelCase__ , {} )
| 67 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 155 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> Optional[Any]:
__lowerCamelCase = []
__lowerCamelCase = set({'''(''', '''[''', '''{'''} )
__lowerCamelCase = set({''')''', ''']''', '''}'''} )
__lowerCamelCase = {'''{''': '''}''', '''[''': ''']''', '''(''': ''')'''}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def __lowerCAmelCase ( ) -> str:
__lowerCamelCase = input('''Enter sequence of brackets: ''' )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , '''is balanced''' )
else:
print(UpperCamelCase__ , '''is not balanced''' )
if __name__ == "__main__":
main()
| 67 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : Optional[Any] = {
"configuration_squeezebert": [
"SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SqueezeBertConfig",
"SqueezeBertOnnxConfig",
],
"tokenization_squeezebert": ["SqueezeBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = ["SqueezeBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[Any] = [
"SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"SqueezeBertForMaskedLM",
"SqueezeBertForMultipleChoice",
"SqueezeBertForQuestionAnswering",
"SqueezeBertForSequenceClassification",
"SqueezeBertForTokenClassification",
"SqueezeBertModel",
"SqueezeBertModule",
"SqueezeBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 114 | '''simple docstring'''
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Dict ="M-CLIP"
def __init__( self : Tuple , a : Optional[int]=10_24 , a : Tuple=7_68 , **a : List[str] ):
"""simple docstring"""
__lowerCamelCase = transformerDimSize
__lowerCamelCase = imageDimSize
super().__init__(**a )
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[Any] =MCLIPConfig
def __init__( self : str , a : List[Any] , *a : Dict , **a : str ):
"""simple docstring"""
super().__init__(a , *a , **a )
__lowerCamelCase = XLMRobertaModel(a )
__lowerCamelCase = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , a : int , a : List[Any] ):
"""simple docstring"""
__lowerCamelCase = self.transformer(input_ids=a , attention_mask=a )[0]
__lowerCamelCase = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(a ), embs
| 67 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.