code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
__a = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
__a = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
__a = '''zero2'''
__a = '''zero3'''
__a = [ZEROa, ZEROa]
def __UpperCAmelCase ( a_: List[Any], a_: Optional[Any], a_: Optional[int] ):
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_UpperCAmelCase : Dict = parameterized.to_safe_name("_".join(str(__lowercase ) for x in param.args ) )
return f"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
__a = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class A__ ( UpperCamelCase ):
"""simple docstring"""
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : List[Any] ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : int , lowerCAmelCase__ : List[str] ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict ) -> Optional[Any]:
"""simple docstring"""
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
@require_torch_multi_gpu
@parameterized.expand(lowerCAmelCase__ , name_func=lowerCAmelCase__ )
def _lowerCAmelCase ( self : int , lowerCAmelCase__ : str , lowerCAmelCase__ : int ) -> str:
"""simple docstring"""
self.run_and_check(
stage=lowerCAmelCase__ , model=lowerCAmelCase__ , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Any] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Dict , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 1_0 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = models[model]
_UpperCAmelCase : List[Any] = self.run_trainer(
stage=lowerCAmelCase__ , model_name=lowerCAmelCase__ , eval_steps=lowerCAmelCase__ , num_train_epochs=1 , distributed=lowerCAmelCase__ , fpaa=lowerCAmelCase__ , )
self.do_checks(lowerCAmelCase__ )
return output_dir
def _lowerCAmelCase ( self : str , lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : int = 1_0 , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : bool = True , ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Optional[int] = self.get_auto_remove_tmp_dir("./xxx" , after=lowerCAmelCase__ )
_UpperCAmelCase : str = F"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(lowerCAmelCase__ )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["--fp16"] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_UpperCAmelCase : List[str] = F"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
_UpperCAmelCase : Dict = [F"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
_UpperCAmelCase : str = self.get_launcher(lowerCAmelCase__ )
_UpperCAmelCase : List[Any] = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(lowerCAmelCase__ , env=self.get_env() )
return output_dir
def _lowerCAmelCase ( self : List[Any] , lowerCAmelCase__ : str=False ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : Optional[Any] = min(2 , get_gpu_count() ) if distributed else 1
return F"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 145 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : List[Any] , lowercase : Dict ):
'''simple docstring'''
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
_snake_case = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(lowercase )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Any ):
'''simple docstring'''
_snake_case = 'sgugger/tiny-distilbert-classification'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , only_pretrain_model=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , torchscript=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , fpaa=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
# set architectures equal to `None`
_snake_case = None
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def A ( self : str ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , fpaa=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = 'sshleifer/tinier_bart'
_snake_case = AutoConfig.from_pretrained(lowercase )
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase , configs=[config] )
_snake_case = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , save_to_csv=lowercase , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(lowercase , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(lowercase , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(lowercase , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(lowercase , 'train_time.csv' ) , env_info_csv_file=os.path.join(lowercase , 'env.csv' ) , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
benchmark.run()
self.assertTrue(Path(os.path.join(lowercase , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(lowercase , 'env.csv' ) ).exists() )
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(lowercase : Optional[Any] ):
self.assertTrue(hasattr(lowercase , 'sequential' ) )
self.assertTrue(hasattr(lowercase , 'cumulative' ) )
self.assertTrue(hasattr(lowercase , 'current' ) )
self.assertTrue(hasattr(lowercase , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
_snake_case = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=lowercase , inference=lowercase , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(lowercase , 'log.txt' ) , log_print=lowercase , trace_memory_line_by_line=lowercase , multi_process=lowercase , )
_snake_case = PyTorchBenchmark(lowercase )
_snake_case = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(lowercase , 'log.txt' ) ).exists() ) | 282 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
_A : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
_A : List[str] = 25_60_47
_A : Union[str, Any] = 25_61_45
@require_sentencepiece
@require_tokenizers
class a__ ( a_, unittest.TestCase ):
__lowerCAmelCase = NllbTokenizer
__lowerCAmelCase = NllbTokenizerFast
__lowerCAmelCase = True
__lowerCAmelCase = True
__lowerCAmelCase = {}
def __magic_name__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
lowercase : int = NllbTokenizer(_a , keep_accents=_a )
tokenizer.save_pretrained(self.tmpdirname )
def __magic_name__ ( self ):
lowercase : Optional[int] = NllbTokenizer(_a , keep_accents=_a )
lowercase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(_a , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_a ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
lowercase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase : Dict = tokenizer.convert_tokens_to_ids(_a )
self.assertListEqual(
_a , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
lowercase : int = tokenizer.convert_ids_to_tokens(_a )
self.assertListEqual(
_a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __magic_name__ ( self ):
lowercase : Tuple = (self.rust_tokenizer_class, "hf-internal-testing/tiny-random-nllb", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : Optional[int] = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowercase : Tuple = self.tokenizer_class.from_pretrained(_a , **_a )
lowercase : Any = tempfile.mkdtemp()
lowercase : Any = tokenizer_r.save_pretrained(_a )
lowercase : Union[str, Any] = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
lowercase : Union[str, Any] = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f )
self.assertSequenceEqual(_a , _a )
# Checks everything loads correctly in the same way
lowercase : Optional[int] = tokenizer_r.from_pretrained(_a )
lowercase : int = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=True
lowercase : Optional[Any] = tempfile.mkdtemp()
lowercase : Any = tokenizer_r.save_pretrained(_a , legacy_format=_a )
lowercase : Optional[int] = tokenizer_p.save_pretrained(_a )
# Checks it save with the same files
self.assertSequenceEqual(_a , _a )
# Checks everything loads correctly in the same way
lowercase : Any = tokenizer_r.from_pretrained(_a )
lowercase : str = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
shutil.rmtree(_a )
# Save tokenizer rust, legacy_format=False
lowercase : str = tempfile.mkdtemp()
lowercase : Tuple = tokenizer_r.save_pretrained(_a , legacy_format=_a )
lowercase : Any = tokenizer_p.save_pretrained(_a )
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase : Any = tokenizer_r.from_pretrained(_a )
lowercase : Optional[int] = tokenizer_p.from_pretrained(_a )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_a , _a ) )
shutil.rmtree(_a )
@require_torch
def __magic_name__ ( self ):
if not self.test_seqaseq:
return
lowercase : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
lowercase : Union[str, Any] = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for"
" Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons"
" will only worsen the violence and misery for millions of people.",
]
lowercase : List[str] = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"
" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"
" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
try:
lowercase : Any = tokenizer.prepare_seqaseq_batch(
src_texts=_a , tgt_texts=_a , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 10 )
# max_target_length will default to max_length if not specified
lowercase : Optional[Any] = tokenizer.prepare_seqaseq_batch(
_a , tgt_texts=_a , max_length=3 , return_tensors="pt" )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.labels.shape[1] , 3 )
lowercase : int = tokenizer.prepare_seqaseq_batch(
src_texts=_a , max_length=3 , max_target_length=10 , return_tensors="pt" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3 )
self.assertNotIn("decoder_input_ids" , _a )
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece." )
def __magic_name__ ( self ):
pass
def __magic_name__ ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowercase : List[Any] = [AddedToken("<special>" , lstrip=_a )]
lowercase : str = self.rust_tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , **_a )
lowercase : str = tokenizer_r.encode("Hey this is a <special> token" )
lowercase : Any = tokenizer_r.encode("<special>" , add_special_tokens=_a )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
lowercase : Tuple = self.rust_tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , **_a , )
lowercase : str = self.tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , **_a )
lowercase : Tuple = tokenizer_p.encode("Hey this is a <special> token" )
lowercase : Union[str, Any] = tokenizer_cr.encode("Hey this is a <special> token" )
self.assertEqual(_a , _a )
self.assertEqual(_a , _a )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class a__ ( unittest.TestCase ):
__lowerCAmelCase = "facebook/nllb-200-distilled-600M"
__lowerCAmelCase = [
" UN Chief Says There Is No Military Solution in Syria",
" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.",
]
__lowerCAmelCase = [
"Şeful ONU declară că nu există o soluţie militară în Siria",
"Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"
" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"
" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.",
]
__lowerCAmelCase = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def __magic_name__ ( cls ):
lowercase : Any = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn" )
lowercase : Union[str, Any] = 1
return cls
def __magic_name__ ( self ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 256_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 256_002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 256_057 )
def __magic_name__ ( self ):
lowercase : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _a )
def __magic_name__ ( self ):
self.assertIn(_a , self.tokenizer.all_special_ids )
# fmt: off
lowercase : Tuple = [RO_CODE, 4_254, 98_068, 112_923, 39_072, 3_909, 713, 102_767, 26, 17_314, 35_642, 14_683, 33_118, 2_022, 66_987, 2, 256_047]
# fmt: on
lowercase : Any = self.tokenizer.decode(_a , skip_special_tokens=_a )
lowercase : Optional[Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_a )
self.assertEqual(_a , _a )
self.assertNotIn(self.tokenizer.eos_token , _a )
def __magic_name__ ( self ):
lowercase : Dict = ["this is gunna be a long sentence " * 20]
assert isinstance(src_text[0] , _a )
lowercase : int = 10
lowercase : Union[str, Any] = self.tokenizer(_a , max_length=_a , truncation=_a ).input_ids[0]
self.assertEqual(ids[-1] , 2 )
self.assertEqual(ids[0] , _a )
self.assertEqual(len(_a ) , _a )
def __magic_name__ ( self ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"] ) , [256_203, 3] )
def __magic_name__ ( self ):
lowercase : List[Any] = tempfile.mkdtemp()
lowercase : Dict = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_a )
lowercase : Any = NllbTokenizer.from_pretrained(_a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _a )
@require_torch
def __magic_name__ ( self ):
lowercase : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_a , truncation=_a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
lowercase : Tuple = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"] )
self.assertIsInstance(_a , _a )
self.assertEqual((2, 15) , batch.input_ids.shape )
self.assertEqual((2, 15) , batch.attention_mask.shape )
lowercase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _a )
self.assertEqual(_a , batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def __magic_name__ ( self ):
lowercase : Dict = self.tokenizer(self.src_text , padding=_a , truncation=_a , max_length=3 , return_tensors="pt" )
lowercase : List[Any] = self.tokenizer(
text_target=self.tgt_text , padding=_a , truncation=_a , max_length=10 , return_tensors="pt" )
lowercase : Any = targets["input_ids"]
lowercase : List[Any] = shift_tokens_right(
_a , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def __magic_name__ ( self ):
lowercase : Tuple = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
nested_simplify(_a ) , {
# A, test, EOS, en_XX
"input_ids": [[256_047, 70, 7_356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 256_057,
} , )
@require_torch
def __magic_name__ ( self ):
lowercase : Optional[Any] = True
lowercase : List[str] = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2, 256_047] )
lowercase : Any = False
lowercase : Tuple = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn" )
self.assertEqual(
inputs.input_ids , [256_047, 16_297, 134_408, 25_653, 6_370, 248, 254, 103_929, 94_995, 108, 49_486, 2] )
| 202 |
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Tuple , lowercase : int , lowercase : int , lowercase : float = 0 ):
'''simple docstring'''
_snake_case , _snake_case = row, column
_snake_case = [[default_value for c in range(lowercase )] for r in range(lowercase )]
def __str__( self : int ):
'''simple docstring'''
_snake_case = f'''Matrix consist of {self.row} rows and {self.column} columns\n'''
# Make string identifier
_snake_case = 0
for row_vector in self.array:
for obj in row_vector:
_snake_case = max(lowercase , len(str(lowercase ) ) )
_snake_case = f'''%{max_element_length}s'''
# Make string and return
def single_line(lowercase : list[float] ) -> str:
nonlocal string_format_identifier
_snake_case = '['
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector )
line += "]"
return line
s += "\n".join(single_line(lowercase ) for row_vector in self.array )
return s
def __repr__( self : Dict ):
'''simple docstring'''
return str(self )
def A ( self : str , lowercase : tuple[int, int] ):
'''simple docstring'''
if not (isinstance(lowercase , (list, tuple) ) and len(lowercase ) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self : Dict , lowercase : tuple[int, int] ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
return self.array[loc[0]][loc[1]]
def __setitem__( self : str , lowercase : tuple[int, int] , lowercase : float ):
'''simple docstring'''
assert self.validate_indicies(lowercase )
_snake_case = value
def __add__( self : str , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase )
assert self.row == another.row and self.column == another.column
# Add
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] + another[r, c]
return result
def __neg__( self : Tuple ):
'''simple docstring'''
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = -self[r, c]
return result
def __sub__( self : List[str] , lowercase : Matrix ):
'''simple docstring'''
return self + (-another)
def __mul__( self : Dict , lowercase : int | float | Matrix ):
'''simple docstring'''
if isinstance(lowercase , (int, float) ): # Scalar multiplication
_snake_case = Matrix(self.row , self.column )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c] * another
return result
elif isinstance(lowercase , lowercase ): # Matrix multiplication
assert self.column == another.row
_snake_case = Matrix(self.row , another.column )
for r in range(self.row ):
for c in range(another.column ):
for i in range(self.column ):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_snake_case = f'''Unsupported type given for another ({type(lowercase )})'''
raise TypeError(lowercase )
def A ( self : Optional[Any] ):
'''simple docstring'''
_snake_case = Matrix(self.column , self.row )
for r in range(self.row ):
for c in range(self.column ):
_snake_case = self[r, c]
return result
def A ( self : List[Any] , lowercase : Matrix , lowercase : Matrix ):
'''simple docstring'''
assert isinstance(lowercase , lowercase ) and isinstance(lowercase , lowercase )
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_snake_case = v.transpose()
_snake_case = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def a_ ( ) -> None:
# a^(-1)
_snake_case = Matrix(3 , 3 , 0 )
for i in range(3 ):
_snake_case = 1
print(f'''a^(-1) is {ainv}''' )
# u, v
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 1, 2, -3
_snake_case = Matrix(3 , 1 , 0 )
_snake_case , _snake_case , _snake_case = 4, -2, 5
print(f'''u is {u}''' )
print(f'''v is {v}''' )
print(f'''uv^T is {u * v.transpose()}''' )
# Sherman Morrison
print(f'''(a + uv^T)^(-1) is {ainv.sherman_morrison(__lowercase , __lowercase )}''' )
def a_ ( ) -> None:
import doctest
doctest.testmod()
testa() | 282 | 0 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ ):
UpperCAmelCase : int = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
UpperCAmelCase : int = hex_num[0] == '-'
if is_negative:
UpperCAmelCase : Union[str, Any] = hex_num[1:]
try:
UpperCAmelCase : Tuple = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
UpperCAmelCase : Dict = ''
while int_num > 0:
UpperCAmelCase : Dict = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 151 |
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
_lowerCamelCase : Dict = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Tuple , *lowercase : Optional[int] , **lowercase : Any ):
'''simple docstring'''
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 282 | 0 |
from __future__ import annotations
from collections import deque
from collections.abc import Iterator
from dataclasses import dataclass
@dataclass
class __a :
__lowercase : int
__lowercase : int
class __a :
def __init__( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
lowercase__: Tuple = [[] for _ in range(lowerCAmelCase__ )]
lowercase__: str = size
def __getitem__( self , lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
return iter(self._graph[vertex] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
return self._size
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[Any]:
'''simple docstring'''
if weight not in (0, 1):
raise ValueError('Edge weight must be either 0 or 1.' )
if to_vertex < 0 or to_vertex >= self.size:
raise ValueError('Vertex indexes must be in [0; size).' )
self._graph[from_vertex].append(Edge(lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE__ ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = deque([start_vertex] )
lowercase__: int = [None] * self.size
lowercase__: Dict = 0
while queue:
lowercase__: int = queue.popleft()
lowercase__: int = distances[current_vertex]
if current_distance is None:
continue
for edge in self[current_vertex]:
lowercase__: Optional[Any] = current_distance + edge.weight
lowercase__: Optional[Any] = distances[edge.destination_vertex]
if (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and new_distance >= dest_vertex_distance
):
continue
lowercase__: Tuple = new_distance
if edge.weight == 0:
queue.appendleft(edge.destination_vertex )
else:
queue.append(edge.destination_vertex )
if distances[finish_vertex] is None:
raise ValueError('No path from start_vertex to finish_vertex.' )
return distances[finish_vertex]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 196 |
def a_ ( __lowercase : str ) -> int:
_snake_case = hex_num.strip()
if not hex_num:
raise ValueError('No value was passed to the function' )
_snake_case = hex_num[0] == '-'
if is_negative:
_snake_case = hex_num[1:]
try:
_snake_case = int(__lowercase , 16 )
except ValueError:
raise ValueError('Invalid value was passed to the function' )
_snake_case = ''
while int_num > 0:
_snake_case = str(int_num % 2 ) + bin_str
int_num >>= 1
return int(('-' + bin_str) if is_negative else bin_str )
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
class lowerCamelCase_ (snake_case__ ):
'''simple docstring'''
def __init__( self : Tuple , *A : Optional[int] , **A : Any ):
warnings.warn(
"The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ChineseCLIPImageProcessor instead." , A , )
super().__init__(*A , **A )
| 31 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 0 |
_lowerCAmelCase : Optional[Any] = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __snake_case ( _lowerCAmelCase : int ) -> int:
A_ : Any = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100000]
number //= 100000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
_lowerCAmelCase : list[bool | None] = [None] * 10_000_000
_lowerCAmelCase : Optional[Any] = True
_lowerCAmelCase : int = False
def __snake_case ( _lowerCAmelCase : int ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
A_ : Optional[Any] = chain(next_number(__lowercase ) )
A_ : Dict = number_chain
while number < 10000000:
A_ : Union[str, Any] = number_chain
number *= 10
return number_chain
def __snake_case ( _lowerCAmelCase : int = 10000000 ) -> int:
for i in range(1 , __lowercase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 300 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
_lowerCamelCase : List[str] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : Dict , lowercase : Union[List[ControlNetModel], Tuple[ControlNetModel]] ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(lowercase )
def A ( self : Optional[int] , lowercase : torch.FloatTensor , lowercase : Union[torch.Tensor, float, int] , lowercase : torch.Tensor , lowercase : List[torch.tensor] , lowercase : List[float] , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[torch.Tensor] = None , lowercase : Optional[Dict[str, Any]] = None , lowercase : bool = False , lowercase : bool = True , ):
'''simple docstring'''
for i, (image, scale, controlnet) in enumerate(zip(lowercase , lowercase , self.nets ) ):
_snake_case , _snake_case = controlnet(
lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , )
# merge samples
if i == 0:
_snake_case , _snake_case = down_samples, mid_sample
else:
_snake_case = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(lowercase , lowercase )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def A ( self : Dict , lowercase : Union[str, os.PathLike] , lowercase : bool = True , lowercase : Callable = None , lowercase : bool = False , lowercase : Optional[str] = None , ):
'''simple docstring'''
_snake_case = 0
_snake_case = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
lowercase , is_main_process=lowercase , save_function=lowercase , safe_serialization=lowercase , variant=lowercase , )
idx += 1
_snake_case = model_path_to_save + f'''_{idx}'''
@classmethod
def A ( cls : Any , lowercase : Optional[Union[str, os.PathLike]] , **lowercase : List[str] ):
'''simple docstring'''
_snake_case = 0
_snake_case = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
_snake_case = pretrained_model_path
while os.path.isdir(lowercase ):
_snake_case = ControlNetModel.from_pretrained(lowercase , **lowercase )
controlnets.append(lowercase )
idx += 1
_snake_case = pretrained_model_path + f'''_{idx}'''
logger.info(f'''{len(lowercase )} controlnets loaded from {pretrained_model_path}.''' )
if len(lowercase ) == 0:
raise ValueError(
f'''No ControlNets found under {os.path.dirname(lowercase )}. Expected at least {pretrained_model_path + '_0'}.''' )
return cls(lowercase ) | 282 | 0 |
"""simple docstring"""
from collections.abc import Sequence
def _snake_case ( lowercase__ , lowercase__ ):
return sum(c * (x**i) for i, c in enumerate(__lowercase ) )
def _snake_case ( lowercase__ , lowercase__ ):
_lowerCamelCase : Tuple = 0.0
for coeff in reversed(__lowercase ):
_lowerCamelCase : Union[str, Any] = result * x + coeff
return result
if __name__ == "__main__":
lowercase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowercase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x)) | 96 |
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : List[str] , lowercase : list[int] ):
'''simple docstring'''
_snake_case = len(lowercase )
_snake_case = [0] * len_array
if len_array > 0:
_snake_case = array[0]
for i in range(1 , lowercase ):
_snake_case = self.prefix_sum[i - 1] + array[i]
def A ( self : Optional[Any] , lowercase : int , lowercase : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def A ( self : Union[str, Any] , lowercase : int ):
'''simple docstring'''
_snake_case = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowercase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
from typing import List
from .keymap import KEYMAP, get_character
def _snake_case ( snake_case__ : str ):
def decorator(snake_case__ : Optional[Any] ):
A = getattr(__lowercase , 'handle_key' , [] )
handle += [key]
setattr(__lowercase , 'handle_key' , __lowercase )
return func
return decorator
def _snake_case ( *snake_case__ : List[str] ):
def decorator(snake_case__ : Union[str, Any] ):
A = getattr(__lowercase , 'handle_key' , [] )
handle += keys
setattr(__lowercase , 'handle_key' , __lowercase )
return func
return decorator
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __new__( cls : Union[str, Any] ,A_ : str ,A_ : List[str] ,A_ : Any ) -> List[Any]:
A = super().__new__(cls ,A_ ,A_ ,A_ )
if not hasattr(A_ ,'key_handler' ):
setattr(A_ ,'key_handler' ,{} )
setattr(A_ ,'handle_input' ,KeyHandler.handle_input )
for value in attrs.values():
A = getattr(A_ ,'handle_key' ,[] )
for key in handled_keys:
A = value
return new_cls
@staticmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ) -> List[Any]:
A = get_character()
if char != KEYMAP["undefined"]:
A = ord(A_ )
A = cls.key_handler.get(A_ )
if handler:
A = char
return handler(cls )
else:
return None
def _snake_case ( cls : str ):
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 74 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] , lowercase : int = 16 , lowercase : int = 88 , lowercase : Optional[int] = None , lowercase : int = 1 , lowercase : float = 0.0 , lowercase : int = 32 , lowercase : Optional[int] = None , lowercase : bool = False , lowercase : Optional[int] = None , lowercase : Optional[int] = None , lowercase : str = "geglu" , lowercase : Optional[int] = None , ):
'''simple docstring'''
super().__init__()
_snake_case = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=lowercase , attention_head_dim=lowercase , in_channels=lowercase , num_layers=lowercase , dropout=lowercase , norm_num_groups=lowercase , cross_attention_dim=lowercase , attention_bias=lowercase , sample_size=lowercase , num_vector_embeds=lowercase , activation_fn=lowercase , num_embeds_ada_norm=lowercase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_snake_case = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_snake_case = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_snake_case = [1, 0]
def A ( self : Optional[int] , lowercase : Optional[int] , lowercase : List[Any] , lowercase : List[str]=None , lowercase : Tuple=None , lowercase : Dict=None , lowercase : bool = True , ):
'''simple docstring'''
_snake_case = hidden_states
_snake_case = []
_snake_case = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_snake_case = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_snake_case = self.transformer_index_for_condition[i]
_snake_case = self.transformers[transformer_index](
lowercase , encoder_hidden_states=lowercase , timestep=lowercase , cross_attention_kwargs=lowercase , return_dict=lowercase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_snake_case = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_snake_case = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=lowercase ) | 282 | 0 |
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Dict , *A : Optional[Any] , **A : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Tuple , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Any , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[str] , *A : Dict , **A : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : Dict , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : Optional[Any] , **A : int ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[str] , *A : Any , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Union[str, Any] , **A : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : List[str] , **A : Dict ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : Tuple , **A : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : str , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Any , *A : int , **A : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : Tuple , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : int , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[int] , *A : str , **A : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Optional[Any] , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : List[Any] , **A : int ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : Tuple , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *A : Optional[Any] , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[Any] , *A : Tuple , **A : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Tuple , **A : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Optional[int] , **A : int ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Dict , *A : Union[str, Any] , **A : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A : List[str] , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : Any , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : str , **A : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : List[Any] , **A : int ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[str] , *A : Optional[int] , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Optional[int] , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : List[str] , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
def A__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) -> int:
requires_backends(__lowercase , ["""torch"""])
def A__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) -> str:
requires_backends(__lowercase , ["""torch"""])
def A__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) -> Optional[int]:
requires_backends(__lowercase , ["""torch"""])
def A__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) -> List[Any]:
requires_backends(__lowercase , ["""torch"""])
def A__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) -> int:
requires_backends(__lowercase , ["""torch"""])
def A__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) -> str:
requires_backends(__lowercase , ["""torch"""])
def A__ ( *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__) -> Tuple:
requires_backends(__lowercase , ["""torch"""])
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : Tuple , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Optional[Any] , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : List[Any] , **A : int ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : Optional[int] , **A : List[str] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Optional[int] , **A : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : Dict , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[int] , *A : List[Any] , **A : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Tuple , **A : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : List[Any] , **A : Dict ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : str , *A : Optional[Any] , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : str , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Union[str, Any] , **A : int ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[int] , *A : Optional[Any] , **A : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : List[Any] , **A : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Tuple , **A : str ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : List[str] , **A : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : List[str] , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : Tuple , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Dict , *A : Tuple , **A : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Optional[int] , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Tuple , *A : Dict , **A : int ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : int , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : List[Any] , **A : str ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : Tuple , **A : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Union[str, Any] , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : List[str] , **A : str ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : Union[str, Any] , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Tuple , **A : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : List[str] , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : Optional[Any] , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : List[str] , **A : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Optional[int] , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : Optional[Any] , **A : int ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Optional[int] , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : Optional[Any] , **A : Any ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : List[str] , **A : int ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *A : Dict , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : Optional[Any] , **A : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Dict , **A : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : str , **A : List[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : Union[str, Any] , **A : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : Tuple , **A : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A : str , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : Optional[int] , **A : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : int , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Dict , **A : Any ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : str , **A : List[str] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : Union[str, Any] , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Optional[Any] , **A : List[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[int] , *A : Optional[Any] , **A : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Optional[int] , **A : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Tuple , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[int] , *A : Optional[Any] , **A : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Dict , **A : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Optional[int] , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[int] , *A : List[Any] , **A : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : int , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : str , **A : str ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : Tuple , **A : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Dict , **A : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Any , **A : str ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : Optional[Any] , **A : Dict ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : Tuple , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Any , **A : Union[str, Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : Optional[Any] , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : List[str] , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Union[str, Any] , **A : Dict ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : int , **A : List[str] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : str , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Union[str, Any] , **A : str ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : str , *A : Tuple , **A : List[str] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : Optional[int] , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Union[str, Any] , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : str , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : int , **A : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : str , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : int , *A : List[Any] , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Optional[int] , **A : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Tuple , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Tuple , *A : List[str] , **A : Any ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : str , **A : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *A : Optional[int] , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : Optional[int] , **A : str ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Any , **A : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : Optional[Any] , **A : Any ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[str] , *A : Dict , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : int , **A : str ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : int , **A : List[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Dict , *A : str , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A : Any , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : str , **A : Dict ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : str , *A : Any , **A : List[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : int , **A : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : Optional[int] , **A : Any ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[str] , *A : Optional[Any] , **A : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Optional[Any] , **A : int ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Tuple , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Union[str, Any] , *A : str , **A : Union[str, Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A : Tuple , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Optional[int] , **A : int ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Dict , *A : Any , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Any , *A : int , **A : Dict ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : int , **A : List[str] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[Any] , *A : Any , **A : Optional[Any] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Optional[int] , **A : Optional[int] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] , *A : List[str] , **A : Dict ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[Any] , *A : Optional[Any] , **A : Tuple ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : str , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : str , **A : Optional[Any] ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : List[str] , *A : List[Any] , **A : Optional[int] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Union[str, Any] , **A : Any ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : int , *A : str , **A : str ):
requires_backends(cls , ["""torch"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["torch"]
def __init__( self : Optional[int] , *A : Optional[int] , **A : List[str] ):
requires_backends(self , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : Dict , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : str , **A : Tuple ):
requires_backends(cls , ["""torch"""] )
| 111 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoTokenizer.from_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = tokenizer('This is me' , return_tensors='pt' )
_snake_case = model.to_bettertransformer()
self.assertTrue(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_snake_case = model.generate(**lowercase )
_snake_case = model.reverse_bettertransformer()
self.assertFalse(any('BetterTransformer' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowercase )
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
self.assertFalse(
any('BetterTransformer' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_snake_case = model_reloaded.generate(**lowercase )
self.assertTrue(torch.allclose(lowercase , lowercase ) )
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = 'hf-internal-testing/tiny-random-t5'
_snake_case = AutoModelForSeqaSeqLM.from_pretrained(lowercase )
_snake_case = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowercase ):
model.save_pretrained(lowercase )
_snake_case = model.reverse_bettertransformer()
model.save_pretrained(lowercase ) | 282 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
__lowerCAmelCase : List[Any] = CanineTokenizer
__lowerCAmelCase : Dict = False
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
super().setUp()
UpperCAmelCase : List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
return CanineTokenizer.from_pretrained("""google/canine-s""" )
def SCREAMING_SNAKE_CASE ( self , **_SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = 1024
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = self.canine_tokenizer
UpperCAmelCase : List[str] = ["""Life is like a box of chocolates.""", """You never know what you\'re gonna get."""]
# fmt: off
UpperCAmelCase : Union[str, Any] = [57344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57345, 0, 0, 0, 0]
# fmt: on
UpperCAmelCase : Optional[Any] = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
UpperCAmelCase : str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Any = self.canine_tokenizer
UpperCAmelCase : str = ["""Once there was a man.""", """He wrote a test in HuggingFace Tranformers."""]
UpperCAmelCase : Dict = tokenizer(_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("""input_ids""" , _SCREAMING_SNAKE_CASE )
self.assertIn("""attention_mask""" , _SCREAMING_SNAKE_CASE )
self.assertIn("""token_type_ids""" , _SCREAMING_SNAKE_CASE )
@require_torch
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : List[str] = self.canine_tokenizer
UpperCAmelCase : Union[str, Any] = [
"""What\'s the weater?""",
"""It\'s about 25 degrees.""",
]
UpperCAmelCase : Dict = tokenizer(
text_target=_SCREAMING_SNAKE_CASE , max_length=32 , padding="""max_length""" , truncation=_SCREAMING_SNAKE_CASE , return_tensors="""pt""" )
self.assertEqual(32 , targets["""input_ids"""].shape[1] )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
UpperCAmelCase : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : Union[str, Any] = tempfile.mkdtemp()
UpperCAmelCase : Dict = """ He is very happy, UNwant\u00E9d,running"""
UpperCAmelCase : List[str] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
UpperCAmelCase : List[str] = tempfile.mkdtemp()
UpperCAmelCase : Tuple = """ He is very happy, UNwant\u00E9d,running"""
UpperCAmelCase : Optional[Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
UpperCAmelCase : Optional[int] = chr(0xE_0_0_7 )
additional_special_tokens.append(_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""additional_special_tokens""": additional_special_tokens} )
UpperCAmelCase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = after_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertIn(_SCREAMING_SNAKE_CASE , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
UpperCAmelCase : List[Any] = tokenizer.__class__.from_pretrained(_SCREAMING_SNAKE_CASE , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase , UpperCAmelCase : Dict = self.get_clean_sequence(_SCREAMING_SNAKE_CASE )
# a special token for Canine can be defined as follows:
UpperCAmelCase : List[Any] = 0xE_0_0_5
UpperCAmelCase : List[Any] = chr(_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase : Union[str, Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
UpperCAmelCase : str = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , input_encoded + special_token_id )
UpperCAmelCase : List[Any] = tokenizer.decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : Union[str, Any] = chr(0xE_0_0_5 )
UpperCAmelCase : List[Any] = chr(0xE_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_SCREAMING_SNAKE_CASE )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"""additional_special_tokens""": [SPECIAL_TOKEN_2]} )
UpperCAmelCase : List[str] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 1 )
self.assertEqual(token_a[0] , _SCREAMING_SNAKE_CASE )
self.assertEqual(token_a[0] , _SCREAMING_SNAKE_CASE )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
UpperCAmelCase : Any = 0xE_0_0_6
UpperCAmelCase : Optional[int] = chr(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE )
tokenizer.add_special_tokens({"""additional_special_tokens""": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
tokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , """special_tokens_map.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCAmelCase : Dict = json.load(_SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , encoding="""utf-8""" ) as json_file:
UpperCAmelCase : Optional[Any] = json.load(_SCREAMING_SNAKE_CASE )
# a special token for Canine can be defined as follows:
UpperCAmelCase : Union[str, Any] = 0xE_0_0_6
UpperCAmelCase : Tuple = chr(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = [new_token_a]
UpperCAmelCase : Any = [new_token_a]
with open(os.path.join(_SCREAMING_SNAKE_CASE , """special_tokens_map.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
with open(os.path.join(_SCREAMING_SNAKE_CASE , """tokenizer_config.json""" ) , """w""" , encoding="""utf-8""" ) as outfile:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
UpperCAmelCase : Tuple = tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , extra_ids=0 )
self.assertIn(_SCREAMING_SNAKE_CASE , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
UpperCAmelCase : Optional[int] = 0xE_0_0_7
UpperCAmelCase : List[Any] = chr(_SCREAMING_SNAKE_CASE )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
UpperCAmelCase : Dict = [AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE )]
UpperCAmelCase : Tuple = tokenizer_class.from_pretrained(
_SCREAMING_SNAKE_CASE , additional_special_tokens=_SCREAMING_SNAKE_CASE , extra_ids=0 )
self.assertIn(_SCREAMING_SNAKE_CASE , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : Any = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : Tuple = """hello world"""
if self.space_between_special_tokens:
UpperCAmelCase : int = """[CLS] hello world [SEP]"""
else:
UpperCAmelCase : Optional[int] = input
UpperCAmelCase : Any = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[str] = tokenizer.decode(_SCREAMING_SNAKE_CASE , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_SCREAMING_SNAKE_CASE , [output, output.lower()] )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
UpperCAmelCase : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"{tokenizer.__class__.__name__}" ):
UpperCAmelCase : Tuple = [
"""bos_token""",
"""eos_token""",
"""unk_token""",
"""sep_token""",
"""pad_token""",
"""cls_token""",
"""mask_token""",
]
UpperCAmelCase : Union[str, Any] = """a"""
UpperCAmelCase : str = ord(_SCREAMING_SNAKE_CASE )
for attr in attributes_list:
setattr(_SCREAMING_SNAKE_CASE , attr + """_id""" , _SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(_SCREAMING_SNAKE_CASE , attr + """_id""" ) , _SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , attr + """_id""" , _SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(getattr(_SCREAMING_SNAKE_CASE , attr + """_id""" ) , _SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" , [] )
self.assertListEqual(getattr(_SCREAMING_SNAKE_CASE , """additional_special_tokens""" ) , [] )
self.assertListEqual(getattr(_SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" ) , [] )
UpperCAmelCase : Dict = 0xE_0_0_6
UpperCAmelCase : List[str] = chr(_SCREAMING_SNAKE_CASE )
setattr(_SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" , [additional_special_token_id] )
self.assertListEqual(getattr(_SCREAMING_SNAKE_CASE , """additional_special_tokens""" ) , [additional_special_token] )
self.assertListEqual(getattr(_SCREAMING_SNAKE_CASE , """additional_special_tokens_ids""" ) , [additional_special_token_id] )
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
| 109 |
import random
import torch
from huggingface_hub import HfApi
from diffusers import UNetaDModel
_lowerCamelCase : List[Any] = HfApi()
_lowerCamelCase : Dict = {}
# fmt: off
_lowerCamelCase : List[Any] = torch.tensor([
-0.7_5_1_5, -1.6_8_8_3, 0.2_4_2_0, 0.0_3_0_0, 0.6_3_4_7, 1.3_4_3_3, -1.1_7_4_3, -3.7_4_6_7,
1.2_3_4_2, -2.2_4_8_5, 0.4_6_3_6, 0.8_0_7_6, -0.7_9_9_1, 0.3_9_6_9, 0.8_4_9_8, 0.9_1_8_9,
-1.8_8_8_7, -3.3_5_2_2, 0.7_6_3_9, 0.2_0_4_0, 0.6_2_7_1, -2.7_1_4_8, -1.6_3_1_6, 3.0_8_3_9,
0.3_1_8_6, 0.2_7_2_1, -0.9_7_5_9, -1.2_4_6_1, 2.6_2_5_7, 1.3_5_5_7
])
_lowerCamelCase : int = torch.tensor([
-2.3_6_3_9, -2.5_3_4_4, 0.0_0_5_4, -0.6_6_7_4, 1.5_9_9_0, 1.0_1_5_8, 0.3_1_2_4, -2.1_4_3_6,
1.8_7_9_5, -2.5_4_2_9, -0.1_5_6_6, -0.3_9_7_3, 1.2_4_9_0, 2.6_4_4_7, 1.2_2_8_3, -0.5_2_0_8,
-2.8_1_5_4, -3.5_1_1_9, 2.3_8_3_8, 1.2_0_3_3, 1.7_2_0_1, -2.1_2_5_6, -1.4_5_7_6, 2.7_9_4_8,
2.4_2_0_4, -0.9_7_5_2, -1.2_5_4_6, 0.8_0_2_7, 3.2_7_5_8, 3.1_3_6_5
])
_lowerCamelCase : Optional[int] = torch.tensor([
-0.6_5_3_1, -0.6_8_9_1, -0.3_1_7_2, -0.5_3_7_5, -0.9_1_4_0, -0.5_3_6_7, -0.1_1_7_5, -0.7_8_6_9,
-0.3_8_0_8, -0.4_5_1_3, -0.2_0_9_8, -0.0_0_8_3, 0.3_1_8_3, 0.5_1_4_0, 0.2_2_4_7, -0.1_3_0_4,
-0.1_3_0_2, -0.2_8_0_2, -0.2_0_8_4, -0.2_0_2_5, -0.4_9_6_7, -0.4_8_7_3, -0.0_8_6_1, 0.6_9_2_5,
0.0_2_5_0, 0.1_2_9_0, -0.1_5_4_3, 0.6_3_1_6, 1.0_4_6_0, 1.4_9_4_3
])
_lowerCamelCase : Dict = torch.tensor([
0.0_9_1_1, 0.1_1_0_7, 0.0_1_8_2, 0.0_4_3_5, -0.0_8_0_5, -0.0_6_0_8, 0.0_3_8_1, 0.2_1_7_2,
-0.0_2_8_0, 0.1_3_2_7, -0.0_2_9_9, -0.0_2_5_5, -0.0_0_5_0, -0.1_1_7_0, -0.1_0_4_6, 0.0_3_0_9,
0.1_3_6_7, 0.1_7_2_8, -0.0_5_3_3, -0.0_7_4_8, -0.0_5_3_4, 0.1_6_2_4, 0.0_3_8_4, -0.1_8_0_5,
-0.0_7_0_7, 0.0_6_4_2, 0.0_2_2_0, -0.0_1_3_4, -0.1_3_3_3, -0.1_5_0_5
])
_lowerCamelCase : Dict = torch.tensor([
0.1_3_2_1, 0.1_3_3_7, 0.0_4_4_0, 0.0_6_2_2, -0.0_5_9_1, -0.0_3_7_0, 0.0_5_0_3, 0.2_1_3_3,
-0.0_1_7_7, 0.1_4_1_5, -0.0_1_1_6, -0.0_1_1_2, 0.0_0_4_4, -0.0_9_8_0, -0.0_7_8_9, 0.0_3_9_5,
0.1_5_0_2, 0.1_7_8_5, -0.0_4_8_8, -0.0_5_1_4, -0.0_4_0_4, 0.1_5_3_9, 0.0_4_5_4, -0.1_5_5_9,
-0.0_6_6_5, 0.0_6_5_9, 0.0_3_8_3, -0.0_0_0_5, -0.1_2_6_6, -0.1_3_8_6
])
_lowerCamelCase : List[Any] = torch.tensor([
0.1_1_5_4, 0.1_2_1_8, 0.0_3_0_7, 0.0_5_2_6, -0.0_7_1_1, -0.0_5_4_1, 0.0_3_6_6, 0.2_0_7_8,
-0.0_2_6_7, 0.1_3_1_7, -0.0_2_2_6, -0.0_1_9_3, -0.0_0_1_4, -0.1_0_5_5, -0.0_9_0_2, 0.0_3_3_0,
0.1_3_9_1, 0.1_7_0_9, -0.0_5_6_2, -0.0_6_9_3, -0.0_5_6_0, 0.1_4_8_2, 0.0_3_8_1, -0.1_6_8_3,
-0.0_6_8_1, 0.0_6_6_1, 0.0_3_3_1, -0.0_0_4_6, -0.1_2_6_8, -0.1_4_3_1
])
_lowerCamelCase : Dict = torch.tensor([
0.1_1_9_2, 0.1_2_4_0, 0.0_4_1_4, 0.0_6_0_6, -0.0_5_5_7, -0.0_4_1_2, 0.0_4_3_0, 0.2_0_4_2,
-0.0_2_0_0, 0.1_3_8_5, -0.0_1_1_5, -0.0_1_3_2, 0.0_0_1_7, -0.0_9_6_5, -0.0_8_0_2, 0.0_3_9_8,
0.1_4_3_3, 0.1_7_4_7, -0.0_4_5_8, -0.0_5_3_3, -0.0_4_0_7, 0.1_5_4_5, 0.0_4_1_9, -0.1_5_7_4,
-0.0_6_4_5, 0.0_6_2_6, 0.0_3_4_1, -0.0_0_1_0, -0.1_1_9_9, -0.1_3_9_0
])
_lowerCamelCase : int = torch.tensor([
0.1_0_7_5, 0.1_0_7_4, 0.0_2_0_5, 0.0_4_3_1, -0.0_7_7_4, -0.0_6_0_7, 0.0_2_9_8, 0.2_0_4_2,
-0.0_3_2_0, 0.1_2_6_7, -0.0_2_8_1, -0.0_2_5_0, -0.0_0_6_4, -0.1_0_9_1, -0.0_9_4_6, 0.0_2_9_0,
0.1_3_2_8, 0.1_6_5_0, -0.0_5_8_0, -0.0_7_3_8, -0.0_5_8_6, 0.1_4_4_0, 0.0_3_3_7, -0.1_7_4_6,
-0.0_7_1_2, 0.0_6_0_5, 0.0_2_5_0, -0.0_0_9_9, -0.1_3_1_6, -0.1_4_7_3
])
_lowerCamelCase : int = torch.tensor([
-1.4_5_7_2, -2.0_4_8_1, -0.0_4_1_4, -0.6_0_0_5, 1.4_1_3_6, 0.5_8_4_8, 0.4_0_2_8, -2.7_3_3_0,
1.2_2_1_2, -2.1_2_2_8, 0.2_1_5_5, 0.4_0_3_9, 0.7_6_6_2, 2.0_5_3_5, 0.7_4_7_7, -0.3_2_4_3,
-2.1_7_5_8, -2.7_6_4_8, 1.6_9_4_7, 0.7_0_2_6, 1.2_3_3_8, -1.6_0_7_8, -0.8_6_8_2, 2.2_8_1_0,
1.8_5_7_4, -0.5_7_1_8, -0.5_5_8_6, -0.0_1_8_6, 2.3_4_1_5, 2.1_2_5_1])
_lowerCamelCase : Tuple = torch.tensor([
-1.3_6_9_0, -1.9_7_2_0, -0.4_0_9_0, -0.6_9_6_6, 1.4_6_6_0, 0.9_9_3_8, -0.1_3_8_5, -2.7_3_2_4,
0.7_7_3_6, -1.8_9_1_7, 0.2_9_2_3, 0.4_2_9_3, 0.1_6_9_3, 1.4_1_1_2, 1.1_8_8_7, -0.3_1_8_1,
-2.2_1_6_0, -2.6_3_8_1, 1.3_1_7_0, 0.8_1_6_3, 0.9_2_4_0, -1.6_5_4_4, -0.6_0_9_9, 2.5_2_5_9,
1.6_4_3_0, -0.9_0_9_0, -0.9_3_9_2, -0.0_1_2_6, 2.4_2_6_8, 2.3_2_6_6
])
_lowerCamelCase : List[str] = torch.tensor([
-1.3_5_2_5, -1.9_6_2_8, -0.3_9_5_6, -0.6_8_6_0, 1.4_6_6_4, 1.0_0_1_4, -0.1_2_5_9, -2.7_2_1_2,
0.7_7_7_2, -1.8_8_1_1, 0.2_9_9_6, 0.4_3_8_8, 0.1_7_0_4, 1.4_0_2_9, 1.1_7_0_1, -0.3_0_2_7,
-2.2_0_5_3, -2.6_2_8_7, 1.3_3_5_0, 0.8_1_3_1, 0.9_2_7_4, -1.6_2_9_2, -0.6_0_9_8, 2.5_1_3_1,
1.6_5_0_5, -0.8_9_5_8, -0.9_2_9_8, -0.0_1_5_1, 2.4_2_5_7, 2.3_3_5_5
])
_lowerCamelCase : int = torch.tensor([
-2.0_5_8_5, -2.7_8_9_7, -0.2_8_5_0, -0.8_9_4_0, 1.9_0_5_2, 0.5_7_0_2, 0.6_3_4_5, -3.8_9_5_9,
1.5_9_3_2, -3.2_3_1_9, 0.1_9_7_4, 0.0_2_8_7, 1.7_5_6_6, 2.6_5_4_3, 0.8_3_8_7, -0.5_3_5_1,
-3.2_7_3_6, -4.3_3_7_5, 2.9_0_2_9, 1.6_3_9_0, 1.4_6_4_0, -2.1_7_0_1, -1.9_0_1_3, 2.9_3_4_1,
3.4_9_8_1, -0.6_2_5_5, -1.1_6_4_4, -0.1_5_9_1, 3.7_0_9_7, 3.2_0_6_6
])
_lowerCamelCase : Tuple = torch.tensor([
-2.3_1_3_9, -2.5_5_9_4, -0.0_1_9_7, -0.6_7_8_5, 1.7_0_0_1, 1.1_6_0_6, 0.3_0_7_5, -2.1_7_4_0,
1.8_0_7_1, -2.5_6_3_0, -0.0_9_2_6, -0.3_8_1_1, 1.2_1_1_6, 2.6_2_4_6, 1.2_7_3_1, -0.5_3_9_8,
-2.8_1_5_3, -3.6_1_4_0, 2.3_8_9_3, 1.3_2_6_2, 1.6_2_5_8, -2.1_8_5_6, -1.3_2_6_7, 2.8_3_9_5,
2.3_7_7_9, -1.0_6_2_3, -1.2_4_6_8, 0.8_9_5_9, 3.3_3_6_7, 3.2_2_4_3
])
_lowerCamelCase : int = torch.tensor([
-2.0_6_2_8, -2.7_6_6_7, -0.2_0_8_9, -0.8_2_6_3, 2.0_5_3_9, 0.5_9_9_2, 0.6_4_9_5, -3.8_3_3_6,
1.6_0_2_5, -3.2_8_1_7, 0.1_7_2_1, -0.0_6_3_3, 1.7_5_1_6, 2.7_0_3_9, 0.8_1_0_0, -0.5_9_0_8,
-3.2_1_1_3, -4.4_3_4_3, 2.9_2_5_7, 1.3_6_3_2, 1.5_5_6_2, -2.1_4_8_9, -1.9_8_9_4, 3.0_5_6_0,
3.3_3_9_6, -0.7_3_2_8, -1.0_4_1_7, 0.0_3_8_3, 3.7_0_9_3, 3.2_3_4_3
])
_lowerCamelCase : List[Any] = torch.tensor([
-1.4_5_7_4, -2.0_5_6_9, -0.0_4_7_3, -0.6_1_1_7, 1.4_0_1_8, 0.5_7_6_9, 0.4_1_2_9, -2.7_3_4_4,
1.2_2_4_1, -2.1_3_9_7, 0.2_0_0_0, 0.3_9_3_7, 0.7_6_1_6, 2.0_4_5_3, 0.7_3_2_4, -0.3_3_9_1,
-2.1_7_4_6, -2.7_7_4_4, 1.6_9_6_3, 0.6_9_2_1, 1.2_1_8_7, -1.6_1_7_2, -0.8_8_7_7, 2.2_4_3_9,
1.8_4_7_1, -0.5_8_3_9, -0.5_6_0_5, -0.0_4_6_4, 2.3_2_5_0, 2.1_2_1_9
])
# fmt: on
_lowerCamelCase : List[str] = api.list_models(filter='''diffusers''')
for mod in models:
if "google" in mod.author or mod.modelId == "CompVis/ldm-celebahq-256":
_lowerCamelCase : Any = '''/home/patrick/google_checkpoints/''' + mod.modelId.split('''/''')[-1]
print(F'Started running {mod.modelId}!!!')
if mod.modelId.startswith('''CompVis'''):
_lowerCamelCase : Optional[Any] = UNetaDModel.from_pretrained(local_checkpoint, subfolder='''unet''')
else:
_lowerCamelCase : int = UNetaDModel.from_pretrained(local_checkpoint)
torch.manual_seed(0)
random.seed(0)
_lowerCamelCase : Union[str, Any] = torch.randn(1, model.config.in_channels, model.config.sample_size, model.config.sample_size)
_lowerCamelCase : int = torch.tensor([10] * noise.shape[0])
with torch.no_grad():
_lowerCamelCase : int = model(noise, time_step).sample
assert torch.allclose(
logits[0, 0, 0, :30], results['''_'''.join('''_'''.join(mod.modelId.split('''/''')).split('''-'''))], atol=1E-3
)
print(F'{mod.modelId} has passed successfully!!!') | 282 | 0 |
"""simple docstring"""
def UpperCamelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
if not numbers:
return 0
if not isinstance(__lowercase , (list, tuple) ) or not all(
isinstance(__lowercase , __lowercase ) for number in numbers ):
raise ValueError("numbers must be an iterable of integers" )
a_ = a_ = a_ = numbers[0]
for i in range(1 , len(__lowercase ) ):
# update the maximum and minimum subarray products
a_ = numbers[i]
if number < 0:
a_ , a_ = min_till_now, max_till_now
a_ = max(__lowercase , max_till_now * number )
a_ = min(__lowercase , min_till_now * number )
# update the maximum product found till now
a_ = max(__lowercase , __lowercase )
return max_prod | 243 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowercase , 'tf_padding' ) )
self.parent.assertTrue(hasattr(lowercase , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Dict , lowercase : List[str] , lowercase : Dict=13 , lowercase : Optional[int]=3 , lowercase : Any=32 , lowercase : Any=0.25 , lowercase : Union[str, Any]=8 , lowercase : List[Any]=8 , lowercase : List[Any]=6 , lowercase : Dict=32 , lowercase : Dict=True , lowercase : Optional[Any]=True , lowercase : Tuple=True , lowercase : Tuple="relu6" , lowercase : List[Any]=1_280 , lowercase : Optional[Any]=0.1 , lowercase : int=0.02 , lowercase : Optional[Any]=True , lowercase : List[str]=True , lowercase : List[str]=10 , lowercase : Optional[Any]=None , ):
'''simple docstring'''
_snake_case = parent
_snake_case = batch_size
_snake_case = num_channels
_snake_case = image_size
_snake_case = depth_multiplier
_snake_case = depth_divisible_by
_snake_case = min_depth
_snake_case = expand_ratio
_snake_case = tf_padding
_snake_case = output_stride
_snake_case = first_layer_is_expansion
_snake_case = finegrained_output
_snake_case = hidden_act
_snake_case = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
_snake_case = classifier_dropout_prob
_snake_case = use_labels
_snake_case = is_training
_snake_case = num_labels
_snake_case = initializer_range
_snake_case = scope
def A ( self : Union[str, Any] ):
'''simple docstring'''
_snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.num_labels )
_snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def A ( self : str ):
'''simple docstring'''
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] , lowercase : str , lowercase : List[str] , lowercase : str , lowercase : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaModel(config=lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def A ( self : List[Any] , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForImageClassification(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : Any , lowercase : int , lowercase : Dict , lowercase : int , lowercase : List[Any] ):
'''simple docstring'''
_snake_case = self.num_labels
_snake_case = MobileNetVaForSemanticSegmentation(lowercase )
model.to(lowercase )
model.eval()
_snake_case = model(lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_snake_case = model(lowercase , labels=lowercase )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def A ( self : str ):
'''simple docstring'''
_snake_case = self.prepare_config_and_inputs()
_snake_case , _snake_case , _snake_case , _snake_case = config_and_inputs
_snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ,UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase : str = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase : str = (
{
"feature-extraction": MobileNetVaModel,
"image-classification": MobileNetVaForImageClassification,
"image-segmentation": MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase : Optional[int] = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Dict = False
_UpperCAmelCase : Union[str, Any] = False
def A ( self : Any ):
'''simple docstring'''
_snake_case = MobileNetVaModelTester(self )
_snake_case = MobileNetVaConfigTester(self , config_class=lowercase , has_text_modality=lowercase )
def A ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def A ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def A ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def A ( self : Any ):
'''simple docstring'''
pass
def A ( self : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = model_class(lowercase )
_snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_snake_case = [*signature.parameters.keys()]
_snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase )
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def A ( self : List[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowercase : List[Any] , lowercase : Union[str, Any] , lowercase : str ):
_snake_case = model_class(lowercase )
model.to(lowercase )
model.eval()
with torch.no_grad():
_snake_case = model(**self._prepare_for_class(lowercase , lowercase ) )
_snake_case = outputs.hidden_states
_snake_case = 16
self.assertEqual(len(lowercase ) , lowercase )
_snake_case , _snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_snake_case = True
check_hidden_states_output(lowercase , lowercase , lowercase )
def A ( self : Tuple ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
def A ( self : Dict ):
'''simple docstring'''
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase )
@slow
def A ( self : List[Any] ):
'''simple docstring'''
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MobileNetVaModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def a_ ( ) -> Union[str, Any]:
_snake_case = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A ( self : Optional[Any] ):
'''simple docstring'''
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def A ( self : List[Any] ):
'''simple docstring'''
_snake_case = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(lowercase )
_snake_case = self.default_image_processor
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
# verify the logits
_snake_case = torch.Size((1, 1_001) )
self.assertEqual(outputs.logits.shape , lowercase )
_snake_case = torch.tensor([0.2445, -1.1993, 0.1905] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1E-4 ) )
@slow
def A ( self : Dict ):
'''simple docstring'''
_snake_case = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = model.to(lowercase )
_snake_case = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
_snake_case = prepare_img()
_snake_case = image_processor(images=lowercase , return_tensors='pt' ).to(lowercase )
# forward pass
with torch.no_grad():
_snake_case = model(**lowercase )
_snake_case = outputs.logits
# verify the logits
_snake_case = torch.Size((1, 21, 65, 65) )
self.assertEqual(logits.shape , lowercase )
_snake_case = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=lowercase , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , lowercase , atol=1E-4 ) ) | 282 | 0 |
'''simple docstring'''
import os
import unittest
from tempfile import TemporaryDirectory
import torch
import torch.nn as nn
from accelerate.utils import (
OffloadedWeightsLoader,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
)
class A__ ( nn.Module ):
"""simple docstring"""
def __init__( self : List[str] ) -> List[Any]:
"""simple docstring"""
super().__init__()
_UpperCAmelCase : List[Any] = nn.Linear(3 , 4 )
_UpperCAmelCase : int = nn.BatchNormad(4 )
_UpperCAmelCase : Optional[int] = nn.Linear(4 , 5 )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Optional[Any] ) -> Any:
"""simple docstring"""
return self.lineara(self.batchnorm(self.lineara(lowerCAmelCase__ ) ) )
class A__ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : int = ModelForTest()
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , model.state_dict() )
_UpperCAmelCase : Union[str, Any] = os.path.join(lowerCAmelCase__ , "index.json" )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
# TODO: add tests on what is inside the index
for key in ["linear1.weight", "linear1.bias", "linear2.weight", "linear2.bias"]:
_UpperCAmelCase : int = os.path.join(lowerCAmelCase__ , F"""{key}.dat""" )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
# TODO: add tests on the fact weights are properly loaded
def _lowerCAmelCase ( self : str ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Dict = [torch.floataa, torch.floataa, torch.bfloataa]
for dtype in dtypes:
_UpperCAmelCase : List[Any] = torch.randn(2 , 3 , dtype=lowerCAmelCase__ )
with TemporaryDirectory() as tmp_dir:
_UpperCAmelCase : str = offload_weight(lowerCAmelCase__ , "weight" , lowerCAmelCase__ , {} )
_UpperCAmelCase : List[Any] = os.path.join(lowerCAmelCase__ , "weight.dat" )
self.assertTrue(os.path.isfile(lowerCAmelCase__ ) )
self.assertDictEqual(lowerCAmelCase__ , {"weight": {"shape": [2, 3], "dtype": str(lowerCAmelCase__ ).split("." )[1]}} )
_UpperCAmelCase : Union[str, Any] = load_offloaded_weight(lowerCAmelCase__ , index["weight"] )
self.assertTrue(torch.equal(lowerCAmelCase__ , lowerCAmelCase__ ) )
def _lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : str = ModelForTest()
_UpperCAmelCase : Optional[int] = model.state_dict()
_UpperCAmelCase : Union[str, Any] = {k: v for k, v in state_dict.items() if "linear2" not in k}
_UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "linear2" in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : int = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) )
_UpperCAmelCase : List[str] = {k: v for k, v in state_dict.items() if "weight" in k}
_UpperCAmelCase : Dict = {k: v for k, v in state_dict.items() if "weight" not in k}
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCAmelCase : Union[str, Any] = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) )
with TemporaryDirectory() as tmp_dir:
offload_state_dict(lowerCAmelCase__ , lowerCAmelCase__ )
# Duplicates are removed
_UpperCAmelCase : List[str] = OffloadedWeightsLoader(state_dict=lowerCAmelCase__ , save_folder=lowerCAmelCase__ )
# Every key is there with the right value
self.assertEqual(sorted(lowerCAmelCase__ ) , sorted(state_dict.keys() ) )
for key, param in state_dict.items():
self.assertTrue(torch.allclose(lowerCAmelCase__ , weight_map[key] ) )
def _lowerCAmelCase ( self : List[Any] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : str = {"a.1": 0, "a.10": 1, "a.2": 2}
_UpperCAmelCase : List[Any] = extract_submodules_state_dict(lowerCAmelCase__ , ["a.1", "a.2"] )
self.assertDictEqual(lowerCAmelCase__ , {"a.1": 0, "a.2": 2} )
_UpperCAmelCase : Any = {"a.1.a": 0, "a.10.a": 1, "a.2.a": 2}
_UpperCAmelCase : Any = extract_submodules_state_dict(lowerCAmelCase__ , ["a.1", "a.2"] )
self.assertDictEqual(lowerCAmelCase__ , {"a.1.a": 0, "a.2.a": 2} ) | 145 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def a_ ( __lowercase : Dict , __lowercase : int , __lowercase : Optional[Any]=None ) -> Any:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, f'''{torch_layer} layer.weight does not match'''
_snake_case = nn.Parameter(__lowercase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'''{torch_layer} layer.bias does not match'''
_snake_case = nn.Parameter(__lowercase )
def a_ ( __lowercase : Any , __lowercase : Dict , __lowercase : Union[str, Any] ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : str , __lowercase : Tuple , __lowercase : Any ) -> Optional[Any]:
# set torch weights for 1-to-1 comparison
_snake_case = np.asarray(weights[0] )
_snake_case = np.asarray(weights[1] )
_snake_case = np.asarray(weights[2] )
_snake_case = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(__lowercase ).transpose(1 , 2 ).contiguous().view(-1 , __lowercase ) , )
set_param(
torch_layer.output.dense , torch.tensor(__lowercase ).view(-1 , __lowercase ).contiguous().transpose(0 , 1 ) , )
def a_ ( __lowercase : Dict , __lowercase : List[str] , __lowercase : Union[str, Any] ) -> Optional[Any]:
# layernorm 1
_snake_case = weights[0][0][0]
_snake_case = np.asarray(layer_norm_a[0] )
_snake_case = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# lsh weights + output
_snake_case = weights[0][1]
if len(__lowercase ) < 4:
set_layer_weights_in_torch_lsh(__lowercase , torch_block.attention , __lowercase )
else:
set_layer_weights_in_torch_local(__lowercase , torch_block.attention , __lowercase )
# intermediate weighs
_snake_case = weights[2][0][1][2]
# Chunked Feed Forward
if len(__lowercase ) == 4:
_snake_case = intermediate_weights[2]
# layernorm 2
_snake_case = np.asarray(intermediate_weights[0][0] )
_snake_case = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# intermediate dense
_snake_case = np.asarray(intermediate_weights[1][0] )
_snake_case = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
# intermediate out
_snake_case = np.asarray(intermediate_weights[4][0] )
_snake_case = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Tuple , __lowercase : Tuple , __lowercase : Dict ) -> Optional[int]:
# reformer model
_snake_case = torch_model.reformer
# word embeds
_snake_case = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(__lowercase ) , )
if isinstance(weights[3] , __lowercase ):
_snake_case = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
_snake_case = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'''{position_embeddings[emb_idx]} emb does not match'''
_snake_case = nn.Parameter(torch.tensor(__lowercase ) )
_snake_case = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
__lowercase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
_snake_case = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(__lowercase , __lowercase , __lowercase )
# output layer norm
_snake_case = np.asarray(weights[7][0] )
_snake_case = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(__lowercase ) , torch.tensor(__lowercase ) , )
# output embeddings
_snake_case = np.asarray(weights[9][0] )
_snake_case = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(__lowercase ).transpose(0 , 1 ).contiguous() , torch.tensor(__lowercase ) , )
def a_ ( __lowercase : Optional[Any] , __lowercase : Dict , __lowercase : List[Any] ) -> Optional[int]:
# Initialise PyTorch model
_snake_case = ReformerConfig.from_json_file(__lowercase )
print(f'''Building PyTorch model from configuration: {config}''' )
_snake_case = ReformerModelWithLMHead(__lowercase )
with open(__lowercase , 'rb' ) as f:
_snake_case = pickle.load(__lowercase )['weights']
set_model_weights_in_torch(__lowercase , __lowercase , config.hidden_size )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __lowercase )
if __name__ == "__main__":
_lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCamelCase : List[Any] = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 282 | 0 |
"""simple docstring"""
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_A : int = False
class a__ ( unittest.TestCase ):
def __magic_name__ ( self , _a=32 ):
set_seed(0 )
lowercase : List[Any] = UNetaDModel(sample_size=_a , in_channels=3 , out_channels=3 )
lowercase : int = torch.optim.SGD(model.parameters() , lr=0.0_0_0_1 )
return model, optimizer
@slow
def __magic_name__ ( self ):
lowercase : Optional[int] = "cpu" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
lowercase : Optional[Any] = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=_a , )
lowercase : List[Any] = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0_0_0_1 , beta_end=0.0_2 , beta_schedule="linear" , clip_sample=_a , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
lowercase : List[Any] = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(_a ) for _ in range(4 )]
lowercase : Dict = [torch.randn((4, 3, 32, 32) ).to(_a ) for _ in range(4 )]
lowercase : Tuple = [torch.randint(0 , 1_000 , (4,) ).long().to(_a ) for _ in range(4 )]
# train with a DDPM scheduler
lowercase , lowercase : List[Any] = self.get_model_optimizer(resolution=32 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
lowercase : Any = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase : List[Any] = model(_a , timesteps[i] ).sample
lowercase : str = torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
lowercase , lowercase : Union[str, Any] = self.get_model_optimizer(resolution=32 )
model.train().to(_a )
for i in range(4 ):
optimizer.zero_grad()
lowercase : List[str] = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
lowercase : Optional[int] = model(_a , timesteps[i] ).sample
lowercase : List[Any] = torch.nn.functional.mse_loss(_a , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(_a , _a , atol=1E-5 ) )
self.assertTrue(torch.allclose(_a , _a , atol=1E-5 ) )
| 202 |
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( __lowercase : Dict ) -> List[Any]:
_snake_case = args.pruning_method
_snake_case = args.threshold
_snake_case = args.model_name_or_path.rstrip('/' )
_snake_case = args.target_model_path
print(f'''Load fine-pruned model from {model_name_or_path}''' )
_snake_case = torch.load(os.path.join(__lowercase , 'pytorch_model.bin' ) )
_snake_case = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "classifier" in name or "qa_output" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
elif "bias" in name:
_snake_case = tensor
print(f'''Copied layer {name}''' )
else:
if pruning_method == "magnitude":
_snake_case = MagnitudeBinarizer.apply(inputs=__lowercase , threshold=__lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = TopKBinarizer.apply(__lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case = ThresholdBinarizer.apply(__lowercase , __lowercase , __lowercase )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
_snake_case = name[:-6]
_snake_case = model[f'''{prefix_}mask_scores''']
_snake_case , _snake_case = -0.1, 1.1
_snake_case = torch.sigmoid(__lowercase )
_snake_case = s * (r - l) + l
_snake_case = s_bar.clamp(min=0.0 , max=1.0 )
_snake_case = tensor * mask
print(f'''Pruned layer {name}''' )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
_snake_case = os.path.join(
os.path.dirname(__lowercase ) , f'''bertarized_{os.path.basename(__lowercase )}''' )
if not os.path.isdir(__lowercase ):
shutil.copytree(__lowercase , __lowercase )
print(f'''\nCreated folder {target_model_path}''' )
torch.save(__lowercase , os.path.join(__lowercase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--pruning_method''',
choices=['''l0''', '''magnitude''', '''topK''', '''sigmoied_threshold'''],
type=str,
required=True,
help=(
'''Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,'''
''' sigmoied_threshold = Soft movement pruning)'''
),
)
parser.add_argument(
'''--threshold''',
type=float,
required=False,
help=(
'''For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model.'''
'''For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared.'''
'''Not needed for `l0`'''
),
)
parser.add_argument(
'''--model_name_or_path''',
type=str,
required=True,
help='''Folder containing the model that was previously fine-pruned''',
)
parser.add_argument(
'''--target_model_path''',
default=None,
type=str,
required=False,
help='''Folder containing the model that was previously fine-pruned''',
)
_lowerCamelCase : int = parser.parse_args()
main(args) | 282 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase__ = {
'''configuration_gpt_neo''': ['''GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTNeoConfig''', '''GPTNeoOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTNeoForCausalLM''',
'''GPTNeoForQuestionAnswering''',
'''GPTNeoForSequenceClassification''',
'''GPTNeoForTokenClassification''',
'''GPTNeoModel''',
'''GPTNeoPreTrainedModel''',
'''load_tf_weights_in_gpt_neo''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
'''FlaxGPTNeoForCausalLM''',
'''FlaxGPTNeoModel''',
'''FlaxGPTNeoPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 151 |
import unittest
from typing import Tuple
import torch
from diffusers.utils import floats_tensor, randn_tensor, torch_all_close, torch_device
from diffusers.utils.testing_utils import require_torch
@require_torch
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
@property
def A ( self : List[str] ):
'''simple docstring'''
return self.get_dummy_input()
@property
def A ( self : Any ):
'''simple docstring'''
if self.block_type == "down":
return (4, 32, 16, 16)
elif self.block_type == "mid":
return (4, 32, 32, 32)
elif self.block_type == "up":
return (4, 32, 64, 64)
raise ValueError(f'''\'{self.block_type}\' is not a supported block_type. Set it to \'up\', \'mid\', or \'down\'.''' )
def A ( self : Union[str, Any] , lowercase : Any=True , lowercase : List[Any]=False , lowercase : List[str]=False , lowercase : Dict=False , ):
'''simple docstring'''
_snake_case = 4
_snake_case = 32
_snake_case = (32, 32)
_snake_case = torch.manual_seed(0 )
_snake_case = torch.device(lowercase )
_snake_case = (batch_size, num_channels) + sizes
_snake_case = randn_tensor(lowercase , generator=lowercase , device=lowercase )
_snake_case = {'hidden_states': hidden_states}
if include_temb:
_snake_case = 128
_snake_case = randn_tensor((batch_size, temb_channels) , generator=lowercase , device=lowercase )
if include_res_hidden_states_tuple:
_snake_case = torch.manual_seed(1 )
_snake_case = (randn_tensor(lowercase , generator=lowercase , device=lowercase ),)
if include_encoder_hidden_states:
_snake_case = floats_tensor((batch_size, 32, 32) ).to(lowercase )
if include_skip_sample:
_snake_case = randn_tensor(((batch_size, 3) + sizes) , generator=lowercase , device=lowercase )
return dummy_input
def A ( self : Any ):
'''simple docstring'''
_snake_case = {
'in_channels': 32,
'out_channels': 32,
'temb_channels': 128,
}
if self.block_type == "up":
_snake_case = 32
if self.block_type == "mid":
init_dict.pop('out_channels' )
_snake_case = self.dummy_input
return init_dict, inputs_dict
def A ( self : Dict , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
unet_block.to(lowercase )
unet_block.eval()
with torch.no_grad():
_snake_case = unet_block(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
self.assertEqual(output.shape , self.output_shape )
_snake_case = output[0, -1, -3:, -3:]
_snake_case = torch.tensor(lowercase ).to(lowercase )
assert torch_all_close(output_slice.flatten() , lowercase , atol=5E-3 )
@unittest.skipIf(torch_device == 'mps' , 'Training is not supported in mps' )
def A ( self : Dict ):
'''simple docstring'''
_snake_case , _snake_case = self.prepare_init_args_and_inputs_for_common()
_snake_case = self.block_class(**lowercase )
model.to(lowercase )
model.train()
_snake_case = model(**lowercase )
if isinstance(lowercase , lowercase ):
_snake_case = output[0]
_snake_case = torch.device(lowercase )
_snake_case = randn_tensor(output.shape , device=lowercase )
_snake_case = torch.nn.functional.mse_loss(lowercase , lowercase )
loss.backward() | 282 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( __UpperCamelCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> Optional[int]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowercase__: Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ )
@torch.no_grad()
def __call__( self , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = 0.0 , lowerCAmelCase__ = 50 , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(self.unet.config.sample_size , lowerCAmelCase__ ):
lowercase__: Tuple = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
lowercase__: Union[str, Any] = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowerCAmelCase__ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
lowercase__: Dict = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
lowercase__: Optional[int] = self.unet(lowerCAmelCase__ , lowerCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowercase__: Dict = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , eta=lowerCAmelCase__ , use_clipped_model_output=lowerCAmelCase__ , generator=lowerCAmelCase__ ).prev_sample
lowercase__: int = (image / 2 + 0.5).clamp(0 , 1 )
lowercase__: Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase__: List[str] = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ )
| 196 |
_lowerCamelCase : int = [4, 1, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : str = [3, 7, 7, 4, 2, 6, 4, 1, 5, 3, 7, 5]
_lowerCamelCase : List[str] = {
0: '''Sunday''',
1: '''Monday''',
2: '''Tuesday''',
3: '''Wednesday''',
4: '''Thursday''',
5: '''Friday''',
6: '''Saturday''',
}
def a_ ( __lowercase : int , __lowercase : int , __lowercase : int ) -> str:
assert len(str(__lowercase ) ) > 2, "year should be in YYYY format"
assert 1 <= month <= 12, "month should be between 1 to 12"
assert 1 <= day <= 31, "day should be between 1 to 31"
# Doomsday algorithm:
_snake_case = year // 100
_snake_case = (5 * (century % 4) + 2) % 7
_snake_case = year % 100
_snake_case = centurian % 12
_snake_case = (
(centurian // 12) + centurian_m + (centurian_m // 4) + century_anchor
) % 7
_snake_case = (
DOOMSDAY_NOT_LEAP[month - 1]
if (year % 4 != 0) or (centurian == 0 and (year % 400) == 0)
else DOOMSDAY_LEAP[month - 1]
)
_snake_case = (dooms_day + day - day_anchor) % 7
return WEEK_DAY_NAMES[week_day]
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
'''simple docstring'''
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__SCREAMING_SNAKE_CASE : Any = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE : int = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__SCREAMING_SNAKE_CASE : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(snake_case__ )} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class lowerCamelCase_ :
'''simple docstring'''
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "The input training data file (a text file)."} )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
__UpperCamelCase: Optional[str] = field(
default=snake_case__ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
__UpperCamelCase: bool = field(default=snake_case__ , metadata={"help": "Whether ot not to use whole word mask."} )
__UpperCamelCase: float = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
__UpperCamelCase: float = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
__UpperCamelCase: int = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
__UpperCamelCase: int = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
__UpperCamelCase: bool = field(
default=snake_case__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def UpperCamelCase_ ( _UpperCAmelCase : DataTrainingArguments , _UpperCAmelCase : PreTrainedTokenizer , _UpperCAmelCase : bool = False , _UpperCAmelCase : Optional[str] = None , ) -> str:
"""simple docstring"""
def _dataset(_UpperCAmelCase : str , _UpperCAmelCase : Dict=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , ref_path=__lowercase , )
return LineByLineTextDataset(tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size )
else:
return TextDataset(
tokenizer=__lowercase , file_path=__lowercase , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=__lowercase , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(__lowercase ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def UpperCamelCase_ ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Dict = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , __lowercase )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
_UpperCAmelCase : int = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase : Any = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
_UpperCAmelCase : int = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
_UpperCAmelCase : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
_UpperCAmelCase : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
_UpperCAmelCase : List[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__lowercase , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
_UpperCAmelCase : Any = AutoModelWithLMHead.from_config(__lowercase )
model.resize_token_embeddings(len(__lowercase ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
_UpperCAmelCase : int = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
_UpperCAmelCase : List[Any] = min(data_args.block_size , tokenizer.max_len )
# Get datasets
_UpperCAmelCase : Any = (
get_dataset(__lowercase , tokenizer=__lowercase , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
_UpperCAmelCase : Optional[Any] = (
get_dataset(__lowercase , tokenizer=__lowercase , evaluate=__lowercase , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
_UpperCAmelCase : Dict = DataCollatorForPermutationLanguageModeling(
tokenizer=__lowercase , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
_UpperCAmelCase : Tuple = DataCollatorForWholeWordMask(
tokenizer=__lowercase , mlm_probability=data_args.mlm_probability )
else:
_UpperCAmelCase : Tuple = DataCollatorForLanguageModeling(
tokenizer=__lowercase , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase : int = Trainer(
model=__lowercase , args=__lowercase , data_collator=__lowercase , train_dataset=__lowercase , eval_dataset=__lowercase , prediction_loss_only=__lowercase , )
# Training
if training_args.do_train:
_UpperCAmelCase : Union[str, Any] = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=__lowercase )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : List[str] = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_UpperCAmelCase : Dict = trainer.evaluate()
_UpperCAmelCase : Union[str, Any] = math.exp(eval_output["eval_loss"] )
_UpperCAmelCase : int = {"perplexity": perplexity}
_UpperCAmelCase : List[Any] = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(__lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , __lowercase , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(__lowercase )
return results
def UpperCamelCase_ ( _UpperCAmelCase : List[Any] ) -> Optional[int]:
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 31 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
_lowerCamelCase : int = False
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
def A ( self : Union[str, Any] , lowercase : Optional[int]=32 ):
'''simple docstring'''
set_seed(0 )
_snake_case = UNetaDModel(sample_size=lowercase , in_channels=3 , out_channels=3 )
_snake_case = torch.optim.SGD(model.parameters() , lr=0.0001 )
return model, optimizer
@slow
def A ( self : List[str] ):
'''simple docstring'''
_snake_case = 'cpu' # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
_snake_case = DDPMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
_snake_case = DDIMScheduler(
num_train_timesteps=1_000 , beta_start=0.0001 , beta_end=0.02 , beta_schedule='linear' , clip_sample=lowercase , )
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
_snake_case = [torch.randn((4, 3, 32, 32) ).clip(-1 , 1 ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randn((4, 3, 32, 32) ).to(lowercase ) for _ in range(4 )]
_snake_case = [torch.randint(0 , 1_000 , (4,) ).long().to(lowercase ) for _ in range(4 )]
# train with a DDPM scheduler
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddpm_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
_snake_case , _snake_case = self.get_model_optimizer(resolution=32 )
model.train().to(lowercase )
for i in range(4 ):
optimizer.zero_grad()
_snake_case = ddim_scheduler.add_noise(clean_images[i] , noise[i] , timesteps[i] )
_snake_case = model(lowercase , timesteps[i] ).sample
_snake_case = torch.nn.functional.mse_loss(lowercase , noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) )
self.assertTrue(torch.allclose(lowercase , lowercase , atol=1E-5 ) ) | 282 | 0 |
import comet # From: unbabel-comet
import torch
import datasets
_lowerCAmelCase : Union[str, Any] = datasets.logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = '''\
@inproceedings{rei-EtAl:2020:WMT,
author = {Rei, Ricardo and Stewart, Craig and Farinha, Ana C and Lavie, Alon},
title = {Unbabel\'s Participation in the WMT20 Metrics Shared Task},
booktitle = {Proceedings of the Fifth Conference on Machine Translation},
month = {November},
year = {2020},
address = {Online},
publisher = {Association for Computational Linguistics},
pages = {909--918},
}
@inproceedings{rei-etal-2020-comet,
title = "{COMET}: A Neural Framework for {MT} Evaluation",
author = "Rei, Ricardo and
Stewart, Craig and
Farinha, Ana C and
Lavie, Alon",
booktitle = "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
month = nov,
year = "2020",
address = "Online",
publisher = "Association for Computational Linguistics",
url = "https://www.aclweb.org/anthology/2020.emnlp-main.213",
pages = "2685--2702",
}
'''
_lowerCAmelCase : Tuple = '''\
Crosslingual Optimized Metric for Evaluation of Translation (COMET) is an open-source framework used to train Machine Translation metrics that achieve high levels of correlation with different types of human judgments (HTER, DA\'s or MQM).
With the release of the framework the authors also released fully trained models that were used to compete in the WMT20 Metrics Shared Task achieving SOTA in that years competition.
See the [README.md] file at https://unbabel.github.io/COMET/html/models.html for more information.
'''
_lowerCAmelCase : Any = '''
COMET score.
Args:
`sources` (list of str): Source sentences
`predictions` (list of str): candidate translations
`references` (list of str): reference translations
`cuda` (bool): If set to True, runs COMET using GPU
`show_progress` (bool): Shows progress
`model`: COMET model to be used. Will default to `wmt-large-da-estimator-1719` if None.
Returns:
`samples`: List of dictionaries with `src`, `mt`, `ref` and `score`.
`scores`: List of scores.
Examples:
>>> comet_metric = datasets.load_metric(\'comet\')
>>> # comet_metric = load_metric(\'comet\', \'wmt20-comet-da\') # you can also choose which model to use
>>> source = ["Dem Feuer konnte Einhalt geboten werden", "Schulen und Kindergärten wurden eröffnet."]
>>> hypothesis = ["The fire could be stopped", "Schools and kindergartens were open"]
>>> reference = ["They were able to control the fire.", "Schools and kindergartens opened"]
>>> results = comet_metric.compute(predictions=hypothesis, references=reference, sources=source)
>>> print([round(v, 2) for v in results["scores"]])
[0.19, 0.92]
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://unbabel.github.io/COMET/html/index.html" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"sources": datasets.Value("string" , id="sequence" ),
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/Unbabel/COMET"] , reference_urls=[
"https://github.com/Unbabel/COMET",
"https://www.aclweb.org/anthology/2020.emnlp-main.213/",
"http://www.statmt.org/wmt20/pdf/2020.wmt-1.101.pdf6",
] , )
def SCREAMING_SNAKE_CASE ( self :Optional[int] , snake_case :List[str] ):
'''simple docstring'''
if self.config_name == "default":
A_ : Optional[Any] = comet.load_from_checkpoint(comet.download_model("wmt20-comet-da" ) )
else:
A_ : Any = comet.load_from_checkpoint(comet.download_model(self.config_name ) )
def SCREAMING_SNAKE_CASE ( self :Any , snake_case :Tuple , snake_case :Union[str, Any] , snake_case :Optional[Any] , snake_case :str=None , snake_case :str=False ):
'''simple docstring'''
if gpus is None:
A_ : Tuple = 1 if torch.cuda.is_available() else 0
A_ : Tuple = {"src": sources, "mt": predictions, "ref": references}
A_ : Union[str, Any] = [dict(zip(snake_case , snake_case ) ) for t in zip(*data.values() )]
A_ , A_ : List[Any] = self.scorer.predict(snake_case , gpus=snake_case , progress_bar=snake_case )
return {"mean_score": mean_score, "scores": scores}
| 300 |
import numpy as np
def a_ ( __lowercase : np.array ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 282 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase=2 , lowercase=3 , lowercase=64 , lowercase=None ):
_lowerCamelCase : Optional[int] = np.random.default_rng(lowercase )
_lowerCamelCase : Dict = length
_lowerCamelCase : Dict = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCamelCase : Optional[int] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
return self.length
def __getitem__( self , lowercase ):
return {"x": self.x[i], "y": self.y[i]}
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowercase=0 , lowercase=0 , lowercase=False ):
super().__init__()
_lowerCamelCase : Union[str, Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Any = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCamelCase : Dict = True
def A_ ( self , lowercase=None ):
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_lowerCamelCase : int = False
return x * self.a[0] + self.b[0]
class lowerCAmelCase__ ( torch.nn.Module ):
'''simple docstring'''
def __init__( self , lowercase=0 , lowercase=0 , lowercase=False ):
super().__init__()
_lowerCamelCase : Dict = torch.nn.Parameter(torch.tensor(lowercase ).float() )
_lowerCamelCase : List[Any] = torch.nn.Parameter(torch.tensor(lowercase ).float() )
_lowerCamelCase : List[Any] = True
def A_ ( self , lowercase=None ):
if self.first_batch:
print(F'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
_lowerCamelCase : Union[str, Any] = False
return x * self.a + self.b
def _snake_case ( lowercase__ , lowercase__ = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCamelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
_lowerCamelCase : Optional[int] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
_lowerCamelCase : str = load_dataset('csv' , data_files=__lowercase )
_lowerCamelCase : Union[str, Any] = datasets['train'].unique('label' )
_lowerCamelCase : List[str] = {v: i for i, v in enumerate(__lowercase )}
def tokenize_function(lowercase__ ):
# max_length=None => use the model max length (it's actually the default)
_lowerCamelCase : int = tokenizer(
examples['sentence1'] , examples['sentence2'] , truncation=__lowercase , max_length=__lowercase , padding='max_length' )
if "label" in examples:
_lowerCamelCase : str = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCamelCase : List[Any] = datasets.map(
__lowercase , batched=__lowercase , remove_columns=['sentence1', 'sentence2', 'label'] , )
def collate_fn(lowercase__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowercase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(__lowercase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_lowerCamelCase : str = DataLoader(tokenized_datasets['train'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=2 )
_lowerCamelCase : Any = DataLoader(tokenized_datasets['validation'] , shuffle=__lowercase , collate_fn=__lowercase , batch_size=1 )
return train_dataloader, eval_dataloader | 96 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def A ( self : int ):
'''simple docstring'''
_snake_case = FlaxXLMRobertaModel.from_pretrained('xlm-roberta-base' )
_snake_case = AutoTokenizer.from_pretrained('xlm-roberta-base' )
_snake_case = 'The dog is cute and lives in the garden house'
_snake_case = jnp.array([tokenizer.encode(lowercase )] )
_snake_case = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
_snake_case = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
_snake_case = model(lowercase )['last_hidden_state']
self.assertEqual(output.shape , lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , lowercase , atol=1E-3 ) ) | 282 | 0 |
"""simple docstring"""
from sklearn.metrics import mean_squared_error
import datasets
_lowercase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
_lowercase = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
_lowercase = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict ,A_ : Dict ,A_ : List[str]=None ,A_ : Tuple="uniform_average" ,A_ : Any=True ) -> List[str]:
A = mean_squared_error(
A_ ,A_ ,sample_weight=A_ ,multioutput=A_ ,squared=A_ )
return {"mse": mse} | 74 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : int = None
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Tuple = '''▁'''
_lowerCamelCase : Optional[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowerCamelCase : Any = {
'''vocab_file''': {'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model'''},
'''tokenizer_file''': {
'''google/pegasus-xsum''': '''https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json'''
},
}
_lowerCamelCase : Optional[int] = {
'''google/pegasus-xsum''': 512,
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : int = VOCAB_FILES_NAMES
_UpperCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Any = PegasusTokenizer
_UpperCAmelCase : Dict = ["input_ids", "attention_mask"]
def __init__( self : Tuple , lowercase : str=None , lowercase : Any=None , lowercase : List[Any]="<pad>" , lowercase : List[Any]="</s>" , lowercase : Tuple="<unk>" , lowercase : Any="<mask_2>" , lowercase : List[str]="<mask_1>" , lowercase : List[Any]=None , lowercase : Dict=103 , **lowercase : Optional[Any] , ):
'''simple docstring'''
_snake_case = offset
if additional_special_tokens is not None:
if not isinstance(lowercase , lowercase ):
raise TypeError(
f'''additional_special_tokens should be of type {type(lowercase )}, but is'''
f''' {type(lowercase )}''' )
_snake_case = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f'''<unk_{i}>''' for i in range(len(lowercase ) , self.offset - 1 )
]
if len(set(lowercase ) ) != len(lowercase ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f''' shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.''' )
_snake_case = additional_special_tokens_extended
else:
_snake_case = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f'''<unk_{i}>''' for i in range(2 , self.offset )]
super().__init__(
lowercase , tokenizer_file=lowercase , pad_token=lowercase , eos_token=lowercase , unk_token=lowercase , mask_token=lowercase , mask_token_sent=lowercase , offset=lowercase , additional_special_tokens=lowercase , **lowercase , )
_snake_case = vocab_file
_snake_case = False if not self.vocab_file else True
def A ( self : List[str] , lowercase : Optional[int] ):
'''simple docstring'''
_snake_case = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f''' {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}''' )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : List[Any] , lowercase : List , lowercase : Optional[List] = None , lowercase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return self._special_token_mask(lowercase )
elif token_ids_a is None:
return self._special_token_mask(lowercase ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : Any , lowercase : Tuple , lowercase : Any=None ):
'''simple docstring'''
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , lowercase : str , lowercase : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(lowercase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
_snake_case = os.path.join(
lowercase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(lowercase ):
copyfile(self.vocab_file , lowercase )
return (out_vocab_file,) | 282 | 0 |
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''vocab.json'''}
_snake_case = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
_snake_case = {'''mgp-str''': 27}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = VOCAB_FILES_NAMES
__A : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self , __A , __A="[GO]" , __A="[GO]" , __A="[s]" , __A="[GO]" , **__A ):
"""simple docstring"""
super().__init__(
unk_token=__A , bos_token=__A , eos_token=__A , pad_token=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
lowerCamelCase : int = json.load(__A )
lowerCamelCase : Optional[Any] = {v: k for k, v in self.vocab.items()}
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.vocab )
def _snake_case ( self ):
"""simple docstring"""
return dict(self.vocab , **self.added_tokens_encoder )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = []
for s in text:
char_tokens.extend(__A )
return char_tokens
def _snake_case ( self , __A ):
"""simple docstring"""
return self.vocab.get(__A , self.vocab.get(self.unk_token ) )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.decoder.get(__A )
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error("Vocabulary path ({}) should be a directory".format(__A ) )
return
lowerCamelCase : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
return (vocab_file,)
| 283 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = StableUnCLIPPipeline
__A : Optional[int] = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 32
lowerCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A )
lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
lowerCamelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = AutoencoderKL()
lowerCamelCase : Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , __A , __A=0 ):
"""simple docstring"""
if str(__A ).startswith("mps" ):
lowerCamelCase : Optional[int] = torch.manual_seed(__A )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 283 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "decision_transformer"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Tuple = act_dim
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[Any] = max_ep_len
lowerCamelCase : Union[str, Any] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : Dict = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : List[Any] = n_inner
lowerCamelCase : Any = activation_function
lowerCamelCase : Optional[int] = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Tuple = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = scale_attn_weights
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Optional[int] = reorder_and_upcast_attn
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : Any = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 283 | 1 |
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[str] = 1
while len(SCREAMING_SNAKE_CASE_ ) < 1E6:
constant.append(str(SCREAMING_SNAKE_CASE_ ) )
i += 1
lowerCamelCase : Union[str, Any] = "".join(SCREAMING_SNAKE_CASE_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[99999] )
* int(constant[999999] )
)
if __name__ == "__main__":
print(solution())
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
lowerCamelCase : Union[str, Any] = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase : int = torch.permute(SCREAMING_SNAKE_CASE_ , (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE_ ):
# linear layer
lowerCamelCase : str = flax_key_tuple[:-1] + ("weight",)
lowerCamelCase : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
lowerCamelCase : str = flax_key_tuple[:-1] + ("weight",)
return flax_key_tuple, flax_tensor
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "metadata" in layer:
lowerCamelCase : Optional[int] = layer.split("metadata" )
lowerCamelCase : Tuple = "".join(split_layer[0] )[:-1]
lowerCamelCase : Tuple = [tuple(("metadata" + split_layer[1]).split("/" ) )]
elif "kvstore" in layer:
lowerCamelCase : int = layer.split("kvstore" )
lowerCamelCase : Dict = "".join(split_layer[0] )[:-1]
lowerCamelCase : Optional[Any] = [tuple(("kvstore" + split_layer[1]).split("/" ) )]
else:
lowerCamelCase : Optional[Any] = layer.split("/" )
lowerCamelCase : List[Any] = "/".join(split_layer[:-1] )
lowerCamelCase : Optional[Any] = (split_layer[-1],)
if "kvstore/path" in layer:
lowerCamelCase : Any = f"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
lowerCamelCase : int = "file"
else:
lowerCamelCase : List[str] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = rename_keys(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = {}
for k, v in current_block.items():
lowerCamelCase : Optional[Any] = v
lowerCamelCase : Optional[Any] = new_current_block
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = WEIGHTS_NAME ):
'''simple docstring'''
lowerCamelCase : Dict = convert_file_size_to_int(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = []
lowerCamelCase : Optional[int] = {}
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Any = 0
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
with gfile.GFile(switch_checkpoint_path + "/checkpoint" , "rb" ) as fp:
lowerCamelCase : Dict = serialization.msgpack_restore(fp.read() )["optimizer"]["target"]
lowerCamelCase : Optional[Any] = flatten_dict(SCREAMING_SNAKE_CASE_ , sep="/" )
lowerCamelCase : Any = {}
for layer in checkpoint_info.keys():
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if curr_real_layer_name in all_layers:
lowerCamelCase : List[Any] = content
else:
lowerCamelCase : Dict = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
lowerCamelCase : List[Any] = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
lowerCamelCase : Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
lowerCamelCase , lowerCamelCase : List[Any] = rename_base_flax_keys(tuple(key.split("/" ) ) , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] = "/".join(SCREAMING_SNAKE_CASE_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
lowerCamelCase : str = os.path.join(
SCREAMING_SNAKE_CASE_ , weights_name.replace(".bin" , f"""-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
lowerCamelCase : str = {}
lowerCamelCase : List[str] = 0
lowerCamelCase : Optional[Any] = raw_weights.to(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
lowerCamelCase : str = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace(".bin" , f"""-{len(SCREAMING_SNAKE_CASE_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
lowerCamelCase : List[Any] = {}
lowerCamelCase : str = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[Any] = weights_name.replace(
".bin" , f"""-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , weights_name.replace(".bin" , f"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : List[Any] = shard
for key in shard:
lowerCamelCase : List[Any] = shard_file
# Add the metadata
lowerCamelCase : Tuple = {"total_size": total_size}
lowerCamelCase : Dict = {"metadata": metadata, "weight_map": weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , "w" , encoding="utf-8" ) as f:
lowerCamelCase : Optional[Any] = json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE_ ) + "\n"
f.write(SCREAMING_SNAKE_CASE_ )
return metadata, index
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--max_shard_size''', default='''10GB''', required=False, help='''Max shard size''')
parser.add_argument('''--dtype''', default='''bfloat16''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
_snake_case = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def lowercase_( ):
'''simple docstring'''
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
lowerCamelCase : List[Any] = SwitchTransformersConfig.from_pretrained("google/switch-base-8" )
config.save_pretrained("/home/arthur_huggingface_co/transformers/switch_converted" )
lowerCamelCase : List[str] = SwitchTransformersForConditionalGeneration.from_pretrained(
"/home/arthur_huggingface_co/transformers/switch_converted" , device_map="auto" )
lowerCamelCase : Dict = TaTokenizer.from_pretrained("t5-small" )
lowerCamelCase : Any = "A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."
lowerCamelCase : str = tokenizer(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).input_ids
lowerCamelCase : Optional[int] = model.generate(SCREAMING_SNAKE_CASE_ , decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 283 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowercase_( SCREAMING_SNAKE_CASE_ = 3 ):
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(SCREAMING_SNAKE_CASE_ ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
lowerCamelCase : List[str] = QuantumRegister(SCREAMING_SNAKE_CASE_ , "qr" )
lowerCamelCase : Optional[Any] = ClassicalRegister(SCREAMING_SNAKE_CASE_ , "cr" )
lowerCamelCase : List[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# simulate with 10000 shots
lowerCamelCase : str = Aer.get_backend("qasm_simulator" )
lowerCamelCase : str = execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=10000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 283 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 1 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase : int = "yoso." + orig_key
return orig_key
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase : Dict = val
lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"]
lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 283 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , ):
"""simple docstring"""
lowerCamelCase : str = parent
lowerCamelCase : Union[str, Any] = 13
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = 99
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = 32
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = 4
lowerCamelCase : List[str] = 0.1
lowerCamelCase : int = 0.1
lowerCamelCase : int = 512
lowerCamelCase : List[Any] = 16
lowerCamelCase : Any = 2
lowerCamelCase : Any = 0.02
lowerCamelCase : List[str] = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : int = "last"
lowerCamelCase : int = True
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 0
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : str = None
if self.use_token_type_ids:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Dict = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A )
lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : Dict = model(__A )
lowerCamelCase : Any = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A )
lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A )
lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A )
lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__A : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : List[str] = False
def _snake_case ( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFFlaubertModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCamelCase : str = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : Dict = model(__A )[0]
lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 283 | 1 |
import requests
_snake_case = '''''' # <-- Put your OpenWeatherMap appid here!
_snake_case = '''https://api.openweathermap.org/data/2.5/'''
def lowercase_( SCREAMING_SNAKE_CASE_ = "Chicago" , SCREAMING_SNAKE_CASE_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "weather" , params=locals() ).json()
def lowercase_( SCREAMING_SNAKE_CASE_ = "Kolkata, India" , SCREAMING_SNAKE_CASE_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "forecast" , params=locals() ).json()
def lowercase_( SCREAMING_SNAKE_CASE_ = 55.68 , SCREAMING_SNAKE_CASE_ = 12.57 , SCREAMING_SNAKE_CASE_ = APPID ):
'''simple docstring'''
return requests.get(URL_BASE + "onecall" , params=locals() ).json()
if __name__ == "__main__":
from pprint import pprint
while True:
_snake_case = input('''Enter a location:''').strip()
if location:
pprint(current_weather(location))
else:
break
| 283 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Tuple = "backbone." if is_semantic else ""
lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : Optional[Any] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = q_bias
lowerCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase : int = gamma_a
lowerCamelCase : Optional[Any] = gamma_a
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = val
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "rvlcdip-id2label.json"
lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
lowerCamelCase : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
lowerCamelCase : Optional[Any] = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = outputs.logits
# verify logits
lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
_snake_case = int(input('''Enter number: ''').strip())
print(f'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 283 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_snake_case = 2_99_79_24_58
# Symbols
_snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if event is None:
lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_snake_case = transform(29_97_92_45)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_snake_case = {ct: c, x: 1, y: 1, z: 1}
_snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 283 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''weiweishi/roc-bert-base-zh''': '''https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json''',
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = "roc_bert"
def __init__( self , __A=3_0522 , __A=768 , __A=12 , __A=12 , __A=3072 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=2 , __A=0.02 , __A=1e-12 , __A=True , __A=0 , __A="absolute" , __A=None , __A=True , __A=True , __A=768 , __A=910 , __A=512 , __A=2_4858 , __A=True , **__A , ):
"""simple docstring"""
lowerCamelCase : str = vocab_size
lowerCamelCase : Optional[Any] = max_position_embeddings
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : Any = num_attention_heads
lowerCamelCase : List[Any] = intermediate_size
lowerCamelCase : Union[str, Any] = hidden_act
lowerCamelCase : List[Any] = hidden_dropout_prob
lowerCamelCase : List[str] = attention_probs_dropout_prob
lowerCamelCase : Union[str, Any] = initializer_range
lowerCamelCase : int = type_vocab_size
lowerCamelCase : Union[str, Any] = layer_norm_eps
lowerCamelCase : List[str] = use_cache
lowerCamelCase : Optional[int] = enable_pronunciation
lowerCamelCase : str = enable_shape
lowerCamelCase : Union[str, Any] = pronunciation_embed_dim
lowerCamelCase : Tuple = pronunciation_vocab_size
lowerCamelCase : Optional[int] = shape_embed_dim
lowerCamelCase : List[str] = shape_vocab_size
lowerCamelCase : List[str] = concat_input
lowerCamelCase : Dict = position_embedding_type
lowerCamelCase : Optional[Any] = classifier_dropout
super().__init__(pad_token_id=__A , **__A )
| 283 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 3 , SCREAMING_SNAKE_CASE_ = 7 , SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : str = 0
lowerCamelCase : str = 1
for current_denominator in range(1 , limit + 1 ):
lowerCamelCase : Optional[Any] = current_denominator * numerator // denominator
if current_denominator % denominator == 0:
current_numerator -= 1
if current_numerator * max_denominator > current_denominator * max_numerator:
lowerCamelCase : Dict = current_numerator
lowerCamelCase : str = current_denominator
return max_numerator
if __name__ == "__main__":
print(solution(numerator=3, denominator=7, limit=1_00_00_00))
| 283 |
import argparse
_snake_case = '''docs/source/_static/js/custom.js'''
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase : List[str] = f.readlines()
lowerCamelCase : int = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_snake_case = parser.parse_args()
update_custom_js(args.version)
| 283 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[torch.FloatTensor] = None
__A : torch.FloatTensor = None
__A : Optional[Tuple[torch.FloatTensor]] = None
__A : Optional[Tuple[torch.FloatTensor]] = None
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A=1 , __A=0 , __A=2 , __A=512 , __A="cls" , __A=False , __A=True , **__A , ):
"""simple docstring"""
super().__init__(pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , **__A )
lowerCamelCase : Tuple = project_dim
lowerCamelCase : List[Any] = pooler_fn
lowerCamelCase : Any = learn_encoder
lowerCamelCase : List[str] = use_attention_mask
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = [R"pooler", R"logit_scale"]
__A : str = [R"position_ids", R"predictions.decoder.bias"]
__A : str = "roberta"
__A : int = RobertaSeriesConfig
def __init__( self , __A ):
"""simple docstring"""
super().__init__(__A )
lowerCamelCase : Dict = XLMRobertaModel(__A )
lowerCamelCase : Union[str, Any] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase : Optional[int] = getattr(__A , "has_pre_transformation" , __A )
if self.has_pre_transformation:
lowerCamelCase : List[Any] = nn.Linear(config.hidden_size , config.project_dim )
lowerCamelCase : Tuple = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def _snake_case ( self , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , __A = None , ):
"""simple docstring"""
lowerCamelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
lowerCamelCase : Tuple = self.base_model(
input_ids=__A , attention_mask=__A , token_type_ids=__A , position_ids=__A , head_mask=__A , inputs_embeds=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , output_attentions=__A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=__A , )
if self.has_pre_transformation:
lowerCamelCase : str = outputs["hidden_states"][-2]
lowerCamelCase : Tuple = self.pre_LN(__A )
lowerCamelCase : Dict = self.transformation_pre(__A )
return TransformationModelOutput(
projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
lowerCamelCase : Optional[int] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=__A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 283 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 1 |
import math
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = []
lowerCamelCase : int = 2
lowerCamelCase : Dict = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) ) # Size of every segment
lowerCamelCase : Dict = [True] * (end + 1)
lowerCamelCase : Optional[int] = []
while start <= end:
if temp[start] is True:
in_prime.append(SCREAMING_SNAKE_CASE_ )
for i in range(start * start , end + 1 , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Tuple = False
start += 1
prime += in_prime
lowerCamelCase : Any = end + 1
lowerCamelCase : Optional[int] = min(2 * end , SCREAMING_SNAKE_CASE_ )
while low <= n:
lowerCamelCase : str = [True] * (high - low + 1)
for each in in_prime:
lowerCamelCase : List[Any] = math.floor(low / each ) * each
if t < low:
t += each
for j in range(SCREAMING_SNAKE_CASE_ , high + 1 , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : str = False
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if temp[j] is True:
prime.append(j + low )
lowerCamelCase : Optional[int] = high + 1
lowerCamelCase : Optional[int] = min(high + end , SCREAMING_SNAKE_CASE_ )
return prime
print(sieve(10**6))
| 283 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 10 ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or n < 0:
raise ValueError("Invalid input" )
lowerCamelCase : int = 10**n
lowerCamelCase : Optional[int] = 28433 * (pow(2 , 7830457 , SCREAMING_SNAKE_CASE_ )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f'''{solution(10) = }''')
| 283 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase : int = "yoso." + orig_key
return orig_key
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase : Dict = val
lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"]
lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 283 | 1 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["image_processor", "tokenizer"]
__A : List[str] = "BlipImageProcessor"
__A : str = "AutoTokenizer"
def __init__( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = False
super().__init__(__A , __A )
lowerCamelCase : int = self.image_processor
def __call__( self , __A = None , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None:
lowerCamelCase : Optional[Any] = self.tokenizer
lowerCamelCase : Tuple = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
return text_encoding
# add pixel_values
lowerCamelCase : List[str] = self.image_processor(__A , return_tensors=__A )
if text is not None:
lowerCamelCase : Optional[Any] = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_token_type_ids=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
else:
lowerCamelCase : List[Any] = None
if text_encoding is not None:
encoding_image_processor.update(__A )
return encoding_image_processor
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.tokenizer.model_input_names
lowerCamelCase : List[str] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 | 1 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if gpta_config_file == "":
lowerCamelCase : int = GPTaConfig()
else:
lowerCamelCase : Optional[int] = GPTaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = GPTaModel(SCREAMING_SNAKE_CASE_ )
# Load weights from numpy
load_tf_weights_in_gpta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCamelCase : Optional[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
_snake_case = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 283 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283 | 1 |
from __future__ import annotations
import math
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if num <= 0:
lowerCamelCase : List[str] = f"""{num}: Invalid input, please enter a positive integer."""
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = [True] * (num + 1)
lowerCamelCase : str = []
lowerCamelCase : Optional[int] = 2
lowerCamelCase : Any = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE_ ):
if sieve[i] is True:
lowerCamelCase : str = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('''Enter a positive integer: ''').strip())))
| 283 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[Any] = BioGptTokenizer
__A : Optional[int] = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = "lower newer"
lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : Optional[int] = "lower"
lowerCamelCase : Any = ["low", "er</w>"]
lowerCamelCase : List[str] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokens + ["<unk>"]
lowerCamelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 283 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = '''▁'''
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_snake_case = {
'''vocab_file''': {
'''facebook/nllb-200-distilled-600M''': (
'''https://huggingface.co/facebook/nllb-200-distilled-600M/blob/main/sentencepiece.bpe.model'''
),
}
}
_snake_case = {
'''facebook/nllb-200-distilled-600M''': 10_24,
}
# fmt: off
_snake_case = ['''ace_Arab''', '''ace_Latn''', '''acm_Arab''', '''acq_Arab''', '''aeb_Arab''', '''afr_Latn''', '''ajp_Arab''', '''aka_Latn''', '''amh_Ethi''', '''apc_Arab''', '''arb_Arab''', '''ars_Arab''', '''ary_Arab''', '''arz_Arab''', '''asm_Beng''', '''ast_Latn''', '''awa_Deva''', '''ayr_Latn''', '''azb_Arab''', '''azj_Latn''', '''bak_Cyrl''', '''bam_Latn''', '''ban_Latn''', '''bel_Cyrl''', '''bem_Latn''', '''ben_Beng''', '''bho_Deva''', '''bjn_Arab''', '''bjn_Latn''', '''bod_Tibt''', '''bos_Latn''', '''bug_Latn''', '''bul_Cyrl''', '''cat_Latn''', '''ceb_Latn''', '''ces_Latn''', '''cjk_Latn''', '''ckb_Arab''', '''crh_Latn''', '''cym_Latn''', '''dan_Latn''', '''deu_Latn''', '''dik_Latn''', '''dyu_Latn''', '''dzo_Tibt''', '''ell_Grek''', '''eng_Latn''', '''epo_Latn''', '''est_Latn''', '''eus_Latn''', '''ewe_Latn''', '''fao_Latn''', '''pes_Arab''', '''fij_Latn''', '''fin_Latn''', '''fon_Latn''', '''fra_Latn''', '''fur_Latn''', '''fuv_Latn''', '''gla_Latn''', '''gle_Latn''', '''glg_Latn''', '''grn_Latn''', '''guj_Gujr''', '''hat_Latn''', '''hau_Latn''', '''heb_Hebr''', '''hin_Deva''', '''hne_Deva''', '''hrv_Latn''', '''hun_Latn''', '''hye_Armn''', '''ibo_Latn''', '''ilo_Latn''', '''ind_Latn''', '''isl_Latn''', '''ita_Latn''', '''jav_Latn''', '''jpn_Jpan''', '''kab_Latn''', '''kac_Latn''', '''kam_Latn''', '''kan_Knda''', '''kas_Arab''', '''kas_Deva''', '''kat_Geor''', '''knc_Arab''', '''knc_Latn''', '''kaz_Cyrl''', '''kbp_Latn''', '''kea_Latn''', '''khm_Khmr''', '''kik_Latn''', '''kin_Latn''', '''kir_Cyrl''', '''kmb_Latn''', '''kon_Latn''', '''kor_Hang''', '''kmr_Latn''', '''lao_Laoo''', '''lvs_Latn''', '''lij_Latn''', '''lim_Latn''', '''lin_Latn''', '''lit_Latn''', '''lmo_Latn''', '''ltg_Latn''', '''ltz_Latn''', '''lua_Latn''', '''lug_Latn''', '''luo_Latn''', '''lus_Latn''', '''mag_Deva''', '''mai_Deva''', '''mal_Mlym''', '''mar_Deva''', '''min_Latn''', '''mkd_Cyrl''', '''plt_Latn''', '''mlt_Latn''', '''mni_Beng''', '''khk_Cyrl''', '''mos_Latn''', '''mri_Latn''', '''zsm_Latn''', '''mya_Mymr''', '''nld_Latn''', '''nno_Latn''', '''nob_Latn''', '''npi_Deva''', '''nso_Latn''', '''nus_Latn''', '''nya_Latn''', '''oci_Latn''', '''gaz_Latn''', '''ory_Orya''', '''pag_Latn''', '''pan_Guru''', '''pap_Latn''', '''pol_Latn''', '''por_Latn''', '''prs_Arab''', '''pbt_Arab''', '''quy_Latn''', '''ron_Latn''', '''run_Latn''', '''rus_Cyrl''', '''sag_Latn''', '''san_Deva''', '''sat_Beng''', '''scn_Latn''', '''shn_Mymr''', '''sin_Sinh''', '''slk_Latn''', '''slv_Latn''', '''smo_Latn''', '''sna_Latn''', '''snd_Arab''', '''som_Latn''', '''sot_Latn''', '''spa_Latn''', '''als_Latn''', '''srd_Latn''', '''srp_Cyrl''', '''ssw_Latn''', '''sun_Latn''', '''swe_Latn''', '''swh_Latn''', '''szl_Latn''', '''tam_Taml''', '''tat_Cyrl''', '''tel_Telu''', '''tgk_Cyrl''', '''tgl_Latn''', '''tha_Thai''', '''tir_Ethi''', '''taq_Latn''', '''taq_Tfng''', '''tpi_Latn''', '''tsn_Latn''', '''tso_Latn''', '''tuk_Latn''', '''tum_Latn''', '''tur_Latn''', '''twi_Latn''', '''tzm_Tfng''', '''uig_Arab''', '''ukr_Cyrl''', '''umb_Latn''', '''urd_Arab''', '''uzn_Latn''', '''vec_Latn''', '''vie_Latn''', '''war_Latn''', '''wol_Latn''', '''xho_Latn''', '''ydd_Hebr''', '''yor_Latn''', '''yue_Hant''', '''zho_Hans''', '''zho_Hant''', '''zul_Latn''']
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[str] = VOCAB_FILES_NAMES
__A : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict = PRETRAINED_VOCAB_FILES_MAP
__A : Union[str, Any] = ["input_ids", "attention_mask"]
__A : List[int] = []
__A : List[int] = []
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=None , __A=None , __A=None , __A = None , __A=None , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCamelCase : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
lowerCamelCase : Optional[Any] = legacy_behaviour
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , tokenizer_file=__A , src_lang=__A , tgt_lang=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , legacy_behaviour=__A , **__A , )
lowerCamelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
lowerCamelCase : str = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | ---- | ---- | ---- | ---- | ---- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a'
# spm | '<unk>' | '<s>' | '</s>' | 'an' | '▁n' | '▁m' | '▁t' | '▁k' | '▁a' | '▁s'
# Mimic fairseq token-to-id alignment for the first 4 token
lowerCamelCase : int = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
lowerCamelCase : List[str] = 1
lowerCamelCase : Optional[Any] = len(self.sp_model )
lowerCamelCase : List[Any] = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__A )
}
lowerCamelCase : int = {v: k for k, v in self.lang_code_to_id.items()}
lowerCamelCase : str = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
lowerCamelCase : List[Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
lowerCamelCase : Any = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
lowerCamelCase : Union[str, Any] = src_lang if src_lang is not None else "eng_Latn"
lowerCamelCase : Union[str, Any] = self.lang_code_to_id[self._src_lang]
lowerCamelCase : Optional[int] = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : int = self.__dict__.copy()
lowerCamelCase : Any = None
lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : Optional[Any] = {}
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def _snake_case ( self ):
"""simple docstring"""
return self._src_lang
@src_lang.setter
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
lowerCamelCase : Optional[Any] = [1] * len(self.prefix_tokens )
lowerCamelCase : List[Any] = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__A )) + suffix_ones
return prefix_ones + ([0] * len(__A )) + ([0] * len(__A )) + suffix_ones
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : str = [self.sep_token_id]
lowerCamelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , __A , __A , __A , __A , **__A ):
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
lowerCamelCase : Any = src_lang
lowerCamelCase : Optional[int] = self(__A , add_special_tokens=__A , return_tensors=__A , **__A )
lowerCamelCase : int = self.convert_tokens_to_ids(__A )
lowerCamelCase : List[str] = tgt_lang_id
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
lowerCamelCase : str = self.sp_model.PieceToId(__A )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self , __A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = "".join(__A ).replace(__A , " " ).strip()
return out_string
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Union[str, Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
lowerCamelCase : Optional[int] = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
def _snake_case ( self , __A , __A = "eng_Latn" , __A = None , __A = "fra_Latn" , **__A , ):
"""simple docstring"""
lowerCamelCase : str = src_lang
lowerCamelCase : Tuple = tgt_lang
return super().prepare_seqaseq_batch(__A , __A , **__A )
def _snake_case ( self ):
"""simple docstring"""
return self.set_src_lang_special_tokens(self.src_lang )
def _snake_case ( self ):
"""simple docstring"""
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.lang_code_to_id[src_lang]
if self.legacy_behaviour:
lowerCamelCase : Optional[int] = []
lowerCamelCase : Dict = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase : int = [self.cur_lang_code]
lowerCamelCase : Tuple = [self.eos_token_id]
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.lang_code_to_id[lang]
if self.legacy_behaviour:
lowerCamelCase : Union[str, Any] = []
lowerCamelCase : Tuple = [self.eos_token_id, self.cur_lang_code]
else:
lowerCamelCase : List[str] = [self.cur_lang_code]
lowerCamelCase : Optional[Any] = [self.eos_token_id]
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase : List[Any] = 1
lowerCamelCase : Union[str, Any] = 1
while repunit:
lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : List[str] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = field(default="question-answering-extractive" , metadata={"include_in_asdict_even_if_is_default": True} )
__A : ClassVar[Features] = Features({"question": Value("string" ), "context": Value("string" )} )
__A : ClassVar[Features] = Features(
{
"answers": Sequence(
{
"text": Value("string" ),
"answer_start": Value("int32" ),
} )
} )
__A : str = "question"
__A : str = "context"
__A : str = "answers"
@property
def _snake_case ( self ):
"""simple docstring"""
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 283 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Tuple = "backbone." if is_semantic else ""
lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : Optional[Any] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = q_bias
lowerCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase : int = gamma_a
lowerCamelCase : Optional[Any] = gamma_a
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = val
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "rvlcdip-id2label.json"
lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
lowerCamelCase : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
lowerCamelCase : Optional[Any] = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = outputs.logits
# verify logits
lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 10 , SCREAMING_SNAKE_CASE_ = 22 ):
'''simple docstring'''
lowerCamelCase : Dict = range(1 , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = range(1 , SCREAMING_SNAKE_CASE_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 283 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = ""
for i in table:
res += inp[i - 1]
return res
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return data[1:] + data[0]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Tuple = ""
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = int("0b" + data[0] + data[-1] , 2 )
lowerCamelCase : str = int("0b" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = message[:4]
lowerCamelCase : Union[str, Any] = message[4:]
lowerCamelCase : Any = apply_table(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[:4] ) # noqa: E741
lowerCamelCase : Tuple = apply_sbox(SCREAMING_SNAKE_CASE_ , temp[4:] )
lowerCamelCase : Dict = "0" * (2 - len(SCREAMING_SNAKE_CASE_ )) + l # noqa: E741
lowerCamelCase : Union[str, Any] = "0" * (2 - len(SCREAMING_SNAKE_CASE_ )) + r
lowerCamelCase : Optional[Any] = apply_table(l + r , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = xor(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return temp + right
if __name__ == "__main__":
_snake_case = input('''Enter 10 bit key: ''')
_snake_case = input('''Enter 8 bit message: ''')
_snake_case = [6, 3, 7, 4, 8, 5, 10, 9]
_snake_case = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
_snake_case = [2, 4, 3, 1]
_snake_case = [2, 6, 3, 1, 4, 8, 5, 7]
_snake_case = [4, 1, 3, 5, 7, 2, 8, 6]
_snake_case = [4, 1, 2, 3, 2, 3, 4, 1]
_snake_case = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
_snake_case = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
_snake_case = apply_table(key, paa_table)
_snake_case = temp[:5]
_snake_case = temp[5:]
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = left_shift(left)
_snake_case = left_shift(right)
_snake_case = apply_table(left + right, pa_table)
# encryption
_snake_case = apply_table(message, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
_snake_case = apply_table(CT, IP)
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = temp[4:] + temp[:4]
_snake_case = function(expansion, sa, sa, keya, temp)
_snake_case = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT)
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ):
lowerCamelCase : Tuple = False
for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j]
lowerCamelCase : Optional[int] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 283 | 1 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_snake_case = {
'''vocab_file''': {
'''camembert-base''': '''https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model''',
}
}
_snake_case = {
'''camembert-base''': 5_12,
}
_snake_case = '''▁'''
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = VOCAB_FILES_NAMES
__A : Dict = PRETRAINED_VOCAB_FILES_MAP
__A : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Dict = ["input_ids", "attention_mask"]
def __init__( self , __A , __A="<s>" , __A="</s>" , __A="</s>" , __A="<s>" , __A="<unk>" , __A="<pad>" , __A="<mask>" , __A=["<s>NOTUSED", "</s>NOTUSED"] , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : int = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
lowerCamelCase : Dict = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , additional_special_tokens=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
lowerCamelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__A ) )
lowerCamelCase : Any = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
lowerCamelCase : Optional[int] = {"<s>NOTUSED": 0, "<pad>": 1, "</s>NOTUSED": 2, "<unk>": 3}
lowerCamelCase : int = len(self.fairseq_tokens_to_ids )
lowerCamelCase : Tuple = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
lowerCamelCase : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCamelCase : Union[str, Any] = [self.cls_token_id]
lowerCamelCase : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _snake_case ( self , __A , __A = None , __A = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Any = [self.sep_token_id]
lowerCamelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _snake_case ( self ):
"""simple docstring"""
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self , __A ):
"""simple docstring"""
return self.sp_model.encode(__A , out_type=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(__A ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(__A )
def _snake_case ( self , __A ):
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : str = []
lowerCamelCase : List[Any] = ""
lowerCamelCase : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
lowerCamelCase : List[str] = True
lowerCamelCase : List[Any] = []
else:
current_sub_tokens.append(__A )
lowerCamelCase : Any = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def __getstate__( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.__dict__.copy()
lowerCamelCase : str = None
return state
def __setstate__( self , __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase : List[Any] = {}
lowerCamelCase : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : int = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
lowerCamelCase : Tuple = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 283 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = StableUnCLIPPipeline
__A : Optional[int] = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 32
lowerCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A )
lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
lowerCamelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = AutoencoderKL()
lowerCamelCase : Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , __A , __A=0 ):
"""simple docstring"""
if str(__A ).startswith("mps" ):
lowerCamelCase : Optional[int] = torch.manual_seed(__A )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 283 | 1 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
_snake_case = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = test_results.split(" " )
lowerCamelCase : Any = 0
lowerCamelCase : Tuple = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
lowerCamelCase : str = expressions[-2] if "=" in expressions[-1] else expressions[-1]
for i, expression in enumerate(SCREAMING_SNAKE_CASE_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = {}
lowerCamelCase : List[str] = None
lowerCamelCase : Dict = False
for line in failures_short_lines.split("\n" ):
if re.search(r"_ \[doctest\]" , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = True
lowerCamelCase : int = line.split(" " )[2]
elif in_error and not line.split(" " )[0].isdigit():
lowerCamelCase : str = line
lowerCamelCase : str = False
return failures
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : str = title
lowerCamelCase : str = doc_test_results["time_spent"].split("," )[0]
lowerCamelCase : Any = doc_test_results["success"]
lowerCamelCase : Tuple = doc_test_results["failures"]
lowerCamelCase : Dict = self.n_success + self.n_failures
# Failures and success of the modeling tests
lowerCamelCase : Any = doc_test_results
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = [self._time_spent]
lowerCamelCase : Tuple = 0
for time in time_spent:
lowerCamelCase : Tuple = time.split(":" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__A ) == 1:
lowerCamelCase : Optional[Any] = [0, 0, time_parts[0]]
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(__A )}h{int(__A )}m{int(__A )}s"""
@property
def _snake_case ( self ):
"""simple docstring"""
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def _snake_case ( self ):
"""simple docstring"""
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = 40
lowerCamelCase : List[str] = {k: v["failed"] for k, v in doc_test_results.items() if isinstance(__A , __A )}
lowerCamelCase : List[Any] = ""
for category, failures in category_failures.items():
if len(__A ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__A )
@staticmethod
def _snake_case ( ):
"""simple docstring"""
lowerCamelCase : Dict = [
{
"type": "section",
"text": {
"type": "plain_text",
"text": "There was an issue running the tests.",
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(__A )} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text="There was an issue running the tests." , blocks=__A , )
def _snake_case ( self ):
"""simple docstring"""
print("Sending the following payload" )
print(json.dumps({"blocks": json.loads(self.payload )} ) )
lowerCamelCase : List[Any] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else "All tests passed."
lowerCamelCase : Optional[Any] = client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , blocks=self.payload , text=__A , )
def _snake_case ( self , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = ""
for key, value in failures.items():
lowerCamelCase : List[str] = value[:200] + " [Truncated]" if len(__A ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
lowerCamelCase : Union[str, Any] = job_name
lowerCamelCase : Tuple = {"type": "section", "text": {"type": "mrkdwn", "text": text}}
if job_link is not None:
lowerCamelCase : Union[str, Any] = {
"type": "button",
"text": {"type": "plain_text", "text": "GitHub Action job", "emoji": True},
"url": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self ):
"""simple docstring"""
if self.thread_ts is None:
raise ValueError("Can only post reply if a post has been made." )
lowerCamelCase : Optional[Any] = self.doc_test_results.pop("job_link" )
self.doc_test_results.pop("failures" )
self.doc_test_results.pop("success" )
self.doc_test_results.pop("time_spent" )
lowerCamelCase : List[Any] = sorted(self.doc_test_results.items() , key=lambda __A : t[0] )
for job, job_result in sorted_dict:
if len(job_result["failures"] ):
lowerCamelCase : List[str] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
lowerCamelCase : str = job_result["failures"]
lowerCamelCase : Union[str, Any] = self.get_reply_blocks(__A , __A , __A , text=__A )
print("Sending the following reply" )
print(json.dumps({"blocks": blocks} ) )
client.chat_postMessage(
channel=os.environ["CI_SLACK_CHANNEL_ID_DAILY"] , text=F"""Results for {job}""" , blocks=__A , thread_ts=self.thread_ts["ts"] , )
time.sleep(1 )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Optional[int] = os.environ["GITHUB_RUN_ID"]
lowerCamelCase : Union[str, Any] = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
lowerCamelCase : Dict = requests.get(SCREAMING_SNAKE_CASE_ ).json()
lowerCamelCase : List[Any] = {}
try:
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
lowerCamelCase : Optional[int] = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : str = requests.get(url + f"""&page={i + 2}""" ).json()
jobs.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return jobs
except Exception as e:
print("Unknown error, could not fetch links." , SCREAMING_SNAKE_CASE_ )
return {}
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = {}
if os.path.exists(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Union[str, Any] = os.listdir(SCREAMING_SNAKE_CASE_ )
for file in files:
try:
with open(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , encoding="utf-8" ) as f:
lowerCamelCase : Union[str, Any] = f.read()
except UnicodeDecodeError as e:
raise ValueError(f"""Could not open {os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}.""" ) from e
return _artifact
def lowercase_( ):
'''simple docstring'''
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : str = name
lowerCamelCase : List[Any] = []
def __str__( self ):
"""simple docstring"""
return self.name
def _snake_case ( self , __A ):
"""simple docstring"""
self.paths.append({"name": self.name, "path": path} )
lowerCamelCase : Dict[str, Artifact] = {}
lowerCamelCase : int = filter(os.path.isdir , os.listdir() )
for directory in directories:
lowerCamelCase : Tuple = directory
if artifact_name not in _available_artifacts:
lowerCamelCase : Tuple = Artifact(SCREAMING_SNAKE_CASE_ )
_available_artifacts[artifact_name].add_path(SCREAMING_SNAKE_CASE_ )
return _available_artifacts
if __name__ == "__main__":
_snake_case = get_job_links()
_snake_case = retrieve_available_artifacts()
_snake_case = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
_snake_case = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
_snake_case = github_actions_job_links.get('''run_doctests''')
_snake_case = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
_snake_case = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
_snake_case , _snake_case , _snake_case = handle_test_results(artifact['''stats'''])
_snake_case = failed
_snake_case = success
_snake_case = time_spent[1:-1] + ''', '''
_snake_case = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
_snake_case = line.replace('''FAILED ''', '''''')
_snake_case = line.split()[0].replace('''\n''', '''''')
if "::" in line:
_snake_case , _snake_case = line.split('''::''')
else:
_snake_case , _snake_case = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
_snake_case = docs[file_regex]
doc_test_results[category]["failed"].append(test)
_snake_case = all_failures[test] if test in all_failures else '''N/A'''
_snake_case = failure
break
_snake_case = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 283 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "decision_transformer"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Tuple = act_dim
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[Any] = max_ep_len
lowerCamelCase : Union[str, Any] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : Dict = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : List[Any] = n_inner
lowerCamelCase : Any = activation_function
lowerCamelCase : Optional[int] = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Tuple = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = scale_attn_weights
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Optional[int] = reorder_and_upcast_attn
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : Any = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 283 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import SegformerImageProcessor, SwinConfig, UperNetConfig, UperNetForSemanticSegmentation
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = 384
lowerCamelCase : List[str] = 7
if "tiny" in model_name:
lowerCamelCase : Dict = 96
lowerCamelCase : Tuple = (2, 2, 6, 2)
lowerCamelCase : Optional[Any] = (3, 6, 12, 24)
elif "small" in model_name:
lowerCamelCase : List[Any] = 96
lowerCamelCase : Any = (2, 2, 18, 2)
lowerCamelCase : Optional[Any] = (3, 6, 12, 24)
elif "base" in model_name:
lowerCamelCase : Optional[int] = 128
lowerCamelCase : Union[str, Any] = (2, 2, 18, 2)
lowerCamelCase : Union[str, Any] = (4, 8, 16, 32)
lowerCamelCase : Tuple = 12
lowerCamelCase : Any = 512
elif "large" in model_name:
lowerCamelCase : int = 192
lowerCamelCase : List[str] = (2, 2, 18, 2)
lowerCamelCase : int = (6, 12, 24, 48)
lowerCamelCase : int = 12
lowerCamelCase : Optional[Any] = 768
# set label information
lowerCamelCase : Tuple = 150
lowerCamelCase : Optional[int] = "huggingface/label-files"
lowerCamelCase : int = "ade20k-id2label.json"
lowerCamelCase : Dict = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : str = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
lowerCamelCase : Union[str, Any] = SwinConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , num_heads=SCREAMING_SNAKE_CASE_ , window_size=SCREAMING_SNAKE_CASE_ , out_features=["stage1", "stage2", "stage3", "stage4"] , )
lowerCamelCase : Dict = UperNetConfig(
backbone_config=SCREAMING_SNAKE_CASE_ , auxiliary_in_channels=SCREAMING_SNAKE_CASE_ , num_labels=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , )
return config
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = []
# fmt: off
# stem
rename_keys.append(("backbone.patch_embed.projection.weight", "backbone.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("backbone.patch_embed.projection.bias", "backbone.embeddings.patch_embeddings.projection.bias") )
rename_keys.append(("backbone.patch_embed.norm.weight", "backbone.embeddings.norm.weight") )
rename_keys.append(("backbone.patch_embed.norm.bias", "backbone.embeddings.norm.bias") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_bias_table""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.relative_position_index""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.proj.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.norm2.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.0.0.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.weight""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.weight""") )
rename_keys.append((f"""backbone.stages.{i}.blocks.{j}.ffn.layers.1.bias""", f"""backbone.encoder.layers.{i}.blocks.{j}.output.dense.bias""") )
if i < 3:
rename_keys.append((f"""backbone.stages.{i}.downsample.reduction.weight""", f"""backbone.encoder.layers.{i}.downsample.reduction.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.weight""", f"""backbone.encoder.layers.{i}.downsample.norm.weight""") )
rename_keys.append((f"""backbone.stages.{i}.downsample.norm.bias""", f"""backbone.encoder.layers.{i}.downsample.norm.bias""") )
rename_keys.append((f"""backbone.norm{i}.weight""", f"""backbone.hidden_states_norms.stage{i+1}.weight""") )
rename_keys.append((f"""backbone.norm{i}.bias""", f"""backbone.hidden_states_norms.stage{i+1}.bias""") )
# decode head
rename_keys.extend(
[
("decode_head.conv_seg.weight", "decode_head.classifier.weight"),
("decode_head.conv_seg.bias", "decode_head.classifier.bias"),
("auxiliary_head.conv_seg.weight", "auxiliary_head.classifier.weight"),
("auxiliary_head.conv_seg.bias", "auxiliary_head.classifier.bias"),
] )
# fmt: on
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = val
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
lowerCamelCase : int = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
lowerCamelCase : Optional[Any] = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.weight""" )
lowerCamelCase : str = state_dict.pop(f"""backbone.stages.{i}.blocks.{j}.attn.w_msa.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowerCamelCase : List[str] = in_proj_weight[:dim, :]
lowerCamelCase : int = in_proj_bias[: dim]
lowerCamelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
lowerCamelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
lowerCamelCase : int = in_proj_weight[
-dim :, :
]
lowerCamelCase : str = in_proj_bias[-dim :]
# fmt: on
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = x.shape
lowerCamelCase : Optional[int] = x.reshape(SCREAMING_SNAKE_CASE_ , 4 , in_channel // 4 )
lowerCamelCase : Tuple = x[:, [0, 2, 1, 3], :].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : int = x.shape
lowerCamelCase : int = x.reshape(SCREAMING_SNAKE_CASE_ , in_channel // 4 , 4 )
lowerCamelCase : str = x[:, :, [0, 2, 1, 3]].transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = x.shape[0]
lowerCamelCase : Union[str, Any] = x.reshape(4 , in_channel // 4 )
lowerCamelCase : Optional[Any] = x[[0, 2, 1, 3], :].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = x.shape[0]
lowerCamelCase : Union[str, Any] = x.reshape(in_channel // 4 , 4 )
lowerCamelCase : List[Any] = x[:, [0, 2, 1, 3]].transpose(0 , 1 ).reshape(SCREAMING_SNAKE_CASE_ )
return x
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = {
"upernet-swin-tiny": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth",
"upernet-swin-small": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth",
"upernet-swin-base": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth",
"upernet-swin-large": "https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth",
}
lowerCamelCase : List[str] = model_name_to_url[model_name]
lowerCamelCase : Any = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" , file_name=SCREAMING_SNAKE_CASE_ )[
"state_dict"
]
for name, param in state_dict.items():
print(SCREAMING_SNAKE_CASE_ , param.shape )
lowerCamelCase : List[Any] = get_upernet_config(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = UperNetForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase : List[str] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "bn" in key:
lowerCamelCase : Tuple = key.replace("bn" , "batch_norm" )
lowerCamelCase : Any = val
# rename keys
lowerCamelCase : Optional[int] = create_rename_keys(SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , config.backbone_config )
# fix downsample parameters
for key, value in state_dict.items():
if "downsample" in key:
if "reduction" in key:
lowerCamelCase : Optional[Any] = reverse_correct_unfold_reduction_order(SCREAMING_SNAKE_CASE_ )
if "norm" in key:
lowerCamelCase : Any = reverse_correct_unfold_norm_order(SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify on image
lowerCamelCase : Tuple = "https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"
lowerCamelCase : List[str] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw ).convert("RGB" )
lowerCamelCase : Optional[int] = SegformerImageProcessor()
lowerCamelCase : Optional[Any] = processor(SCREAMING_SNAKE_CASE_ , return_tensors="pt" ).pixel_values
with torch.no_grad():
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = outputs.logits
print(logits.shape )
print("First values of logits:" , logits[0, 0, :3, :3] )
# assert values
if model_name == "upernet-swin-tiny":
lowerCamelCase : str = torch.tensor(
[[-7.5958, -7.5958, -7.4302], [-7.5958, -7.5958, -7.4302], [-7.4797, -7.4797, -7.3068]] )
elif model_name == "upernet-swin-small":
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-7.1921, -7.1921, -6.9532], [-7.1921, -7.1921, -6.9532], [-7.0908, -7.0908, -6.8534]] )
elif model_name == "upernet-swin-base":
lowerCamelCase : int = torch.tensor(
[[-6.5851, -6.5851, -6.4330], [-6.5851, -6.5851, -6.4330], [-6.4763, -6.4763, -6.3254]] )
elif model_name == "upernet-swin-large":
lowerCamelCase : int = torch.tensor(
[[-7.5297, -7.5297, -7.3802], [-7.5297, -7.5297, -7.3802], [-7.4044, -7.4044, -7.2586]] )
print("Logits:" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , SCREAMING_SNAKE_CASE_ , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f"""Pushing model and processor for {model_name} to hub""" )
model.push_to_hub(f"""openmmlab/{model_name}""" )
processor.push_to_hub(f"""openmmlab/{model_name}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''upernet-swin-tiny''',
type=str,
choices=[f'''upernet-swin-{size}''' for size in ['''tiny''', '''small''', '''base''', '''large''']],
help='''Name of the Swin + UperNet model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_snake_case = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[str] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Tuple = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
def lowercase_( *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
requires_backends(SCREAMING_SNAKE_CASE_ , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Tuple = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[str] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Tuple = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Tuple = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Tuple = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["torch"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["torch"] )
| 283 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : torch.FloatTensor
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __A = 3 , __A = 3 , __A = ("DownEncoderBlock2D",) , __A = ("UpDecoderBlock2D",) , __A = (64,) , __A = 1 , __A = "silu" , __A = 3 , __A = 32 , __A = 256 , __A = 32 , __A = None , __A = 0.18215 , __A = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
lowerCamelCase : Dict = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
lowerCamelCase : Union[str, Any] = vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase : Any = nn.Convad(__A , __A , 1 )
lowerCamelCase : Dict = VectorQuantizer(__A , __A , beta=0.25 , remap=__A , sane_index_shape=__A )
lowerCamelCase : Optional[int] = nn.Convad(__A , __A , 1 )
# pass init params to Decoder
lowerCamelCase : Optional[Any] = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , norm_type=__A , )
@apply_forward_hook
def _snake_case ( self , __A , __A = True ):
"""simple docstring"""
lowerCamelCase : Dict = self.encoder(__A )
lowerCamelCase : Union[str, Any] = self.quant_conv(__A )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__A )
@apply_forward_hook
def _snake_case ( self , __A , __A = False , __A = True ):
"""simple docstring"""
if not force_not_quantize:
lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = self.quantize(__A )
else:
lowerCamelCase : Dict = h
lowerCamelCase : Tuple = self.post_quant_conv(__A )
lowerCamelCase : Any = self.decoder(__A , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _snake_case ( self , __A , __A = True ):
"""simple docstring"""
lowerCamelCase : int = sample
lowerCamelCase : Optional[Any] = self.encode(__A ).latents
lowerCamelCase : List[str] = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 283 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , ):
"""simple docstring"""
lowerCamelCase : str = parent
lowerCamelCase : Union[str, Any] = 13
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = 99
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = 32
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = 4
lowerCamelCase : List[str] = 0.1
lowerCamelCase : int = 0.1
lowerCamelCase : int = 512
lowerCamelCase : List[Any] = 16
lowerCamelCase : Any = 2
lowerCamelCase : Any = 0.02
lowerCamelCase : List[str] = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : int = "last"
lowerCamelCase : int = True
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 0
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : str = None
if self.use_token_type_ids:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Dict = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A )
lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : Dict = model(__A )
lowerCamelCase : Any = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A )
lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A )
lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A )
lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__A : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : List[str] = False
def _snake_case ( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFFlaubertModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCamelCase : str = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : Dict = model(__A )[0]
lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 283 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''Visual-Attention-Network/van-base''': (
'''https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json'''
),
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = "van"
def __init__( self , __A=224 , __A=3 , __A=[7, 3, 3, 3] , __A=[4, 2, 2, 2] , __A=[64, 128, 320, 512] , __A=[3, 3, 12, 3] , __A=[8, 8, 4, 4] , __A="gelu" , __A=0.02 , __A=1e-6 , __A=1e-2 , __A=0.0 , __A=0.0 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
lowerCamelCase : Tuple = image_size
lowerCamelCase : Tuple = num_channels
lowerCamelCase : List[str] = patch_sizes
lowerCamelCase : Tuple = strides
lowerCamelCase : List[str] = hidden_sizes
lowerCamelCase : int = depths
lowerCamelCase : List[Any] = mlp_ratios
lowerCamelCase : Any = hidden_act
lowerCamelCase : List[Any] = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : str = layer_scale_init_value
lowerCamelCase : Union[str, Any] = drop_path_rate
lowerCamelCase : Optional[int] = dropout_rate
| 283 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283 | 1 |
import argparse
import copy
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = {}
with open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
if line.split()[0] not in dict_of_neighbours:
lowerCamelCase : Optional[int] = []
_list.append([line.split()[1], line.split()[2]] )
lowerCamelCase : str = _list
else:
dict_of_neighbours[line.split()[0]].append(
[line.split()[1], line.split()[2]] )
if line.split()[1] not in dict_of_neighbours:
lowerCamelCase : Tuple = []
_list.append([line.split()[0], line.split()[2]] )
lowerCamelCase : Union[str, Any] = _list
else:
dict_of_neighbours[line.split()[1]].append(
[line.split()[0], line.split()[2]] )
return dict_of_neighbours
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ ) as f:
lowerCamelCase : List[str] = f.read(1 )
lowerCamelCase : Dict = start_node
lowerCamelCase : Optional[int] = []
lowerCamelCase : int = start_node
lowerCamelCase : Tuple = 0
while visiting not in first_solution:
lowerCamelCase : Union[str, Any] = 10000
for k in dict_of_neighbours[visiting]:
if int(k[1] ) < int(SCREAMING_SNAKE_CASE_ ) and k[0] not in first_solution:
lowerCamelCase : str = k[1]
lowerCamelCase : str = k[0]
first_solution.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] = distance_of_first_solution + int(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = best_node
first_solution.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = 0
for k in dict_of_neighbours[first_solution[-2]]:
if k[0] == start_node:
break
position += 1
lowerCamelCase : Optional[Any] = (
distance_of_first_solution
+ int(dict_of_neighbours[first_solution[-2]][position][1] )
- 10000
)
return first_solution, distance_of_first_solution
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Tuple = []
for n in solution[1:-1]:
lowerCamelCase : Any = solution.index(SCREAMING_SNAKE_CASE_ )
for kn in solution[1:-1]:
lowerCamelCase : List[str] = solution.index(SCREAMING_SNAKE_CASE_ )
if n == kn:
continue
lowerCamelCase : Tuple = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = kn
lowerCamelCase : List[str] = n
lowerCamelCase : List[str] = 0
for k in _tmp[:-1]:
lowerCamelCase : Union[str, Any] = _tmp[_tmp.index(SCREAMING_SNAKE_CASE_ ) + 1]
for i in dict_of_neighbours[k]:
if i[0] == next_node:
lowerCamelCase : Any = distance + int(i[1] )
_tmp.append(SCREAMING_SNAKE_CASE_ )
if _tmp not in neighborhood_of_solution:
neighborhood_of_solution.append(_tmp )
lowerCamelCase : Union[str, Any] = len(neighborhood_of_solution[0] ) - 1
neighborhood_of_solution.sort(key=lambda SCREAMING_SNAKE_CASE_ : x[index_of_last_item_in_the_list] )
return neighborhood_of_solution
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = 1
lowerCamelCase : List[str] = first_solution
lowerCamelCase : Optional[int] = []
lowerCamelCase : List[Any] = distance_of_first_solution
lowerCamelCase : Optional[Any] = solution
while count <= iters:
lowerCamelCase : List[Any] = find_neighborhood(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = 0
lowerCamelCase : Optional[int] = neighborhood[index_of_best_solution]
lowerCamelCase : Optional[int] = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCamelCase : Optional[int] = False
while not found:
lowerCamelCase : int = 0
while i < len(SCREAMING_SNAKE_CASE_ ):
if best_solution[i] != solution[i]:
lowerCamelCase : int = best_solution[i]
lowerCamelCase : int = solution[i]
break
lowerCamelCase : List[str] = i + 1
if [first_exchange_node, second_exchange_node] not in tabu_list and [
second_exchange_node,
first_exchange_node,
] not in tabu_list:
tabu_list.append([first_exchange_node, second_exchange_node] )
lowerCamelCase : Optional[int] = True
lowerCamelCase : List[str] = best_solution[:-1]
lowerCamelCase : Any = neighborhood[index_of_best_solution][best_cost_index]
if cost < best_cost:
lowerCamelCase : Dict = cost
lowerCamelCase : Any = solution
else:
lowerCamelCase : int = index_of_best_solution + 1
lowerCamelCase : Union[str, Any] = neighborhood[index_of_best_solution]
if len(SCREAMING_SNAKE_CASE_ ) >= size:
tabu_list.pop(0 )
lowerCamelCase : Tuple = count + 1
return best_solution_ever, best_cost
def lowercase_( SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = generate_neighbours(args.File )
lowerCamelCase , lowerCamelCase : int = generate_first_solution(
args.File , SCREAMING_SNAKE_CASE_ )
lowerCamelCase , lowerCamelCase : Any = tabu_search(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , args.Iterations , args.Size , )
print(f"""Best solution: {best_sol}, with total distance: {best_cost}.""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description='''Tabu Search''')
parser.add_argument(
'''-f''',
'''--File''',
type=str,
help='''Path to the file containing the data''',
required=True,
)
parser.add_argument(
'''-i''',
'''--Iterations''',
type=int,
help='''How many iterations the algorithm should perform''',
required=True,
)
parser.add_argument(
'''-s''', '''--Size''', type=int, help='''Size of the tabu list''', required=True
)
# Pass the arguments to main method
main(parser.parse_args())
| 283 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 1 |
import unittest
from .lib import (
Matrix,
Vector,
axpy,
square_zero_matrix,
unit_basis_vector,
zero_vector,
)
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = Vector([1, 2, 3] )
self.assertEqual(x.component(0 ) , 1 )
self.assertEqual(x.component(2 ) , 3 )
lowerCamelCase : int = Vector()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = Vector([0, 0, 0, 0, 0, 1] )
self.assertEqual(str(__A ) , "(0,0,0,0,0,1)" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = Vector([1, 2, 3, 4] )
self.assertEqual(len(__A ) , 4 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = Vector([1, 2] )
lowerCamelCase : Dict = Vector([1, 2, 3, 4, 5] )
lowerCamelCase : str = Vector([0, 0, 0, 0, 0, 0, 0, 0, 0, 0] )
lowerCamelCase : List[str] = Vector([1, -1, 1, -1, 2, -3, 4, -5] )
self.assertAlmostEqual(x.euclidean_length() , 2.236 , 3 )
self.assertAlmostEqual(y.euclidean_length() , 7.416 , 3 )
self.assertEqual(z.euclidean_length() , 0 )
self.assertAlmostEqual(w.euclidean_length() , 7.616 , 3 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = Vector([1, 2, 3] )
lowerCamelCase : Dict = Vector([1, 1, 1] )
self.assertEqual((x + y).component(0 ) , 2 )
self.assertEqual((x + y).component(1 ) , 3 )
self.assertEqual((x + y).component(2 ) , 4 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = Vector([1, 2, 3] )
lowerCamelCase : Optional[int] = Vector([1, 1, 1] )
self.assertEqual((x - y).component(0 ) , 0 )
self.assertEqual((x - y).component(1 ) , 1 )
self.assertEqual((x - y).component(2 ) , 2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = Vector([1, 2, 3] )
lowerCamelCase : Union[str, Any] = Vector([2, -1, 4] ) # for test of dot product
lowerCamelCase : Optional[Any] = Vector([1, -2, -1] )
self.assertEqual(str(x * 3.0 ) , "(3.0,6.0,9.0)" )
self.assertEqual((a * b) , 0 )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(str(zero_vector(10 ) ).count("0" ) , 10 )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(str(unit_basis_vector(3 , 1 ) ) , "(0,1,0)" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = Vector([1, 2, 3] )
lowerCamelCase : List[str] = Vector([1, 0, 1] )
self.assertEqual(str(axpy(2 , __A , __A ) ) , "(3,4,7)" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = Vector([1, 0, 0, 0, 0, 0] )
lowerCamelCase : List[str] = x.copy()
self.assertEqual(str(__A ) , str(__A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = Vector([1, 0, 0] )
x.change_component(0 , 0 )
x.change_component(1 , 1 )
self.assertEqual(str(__A ) , "(0,1,0)" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual("|1,2,3|\n|2,4,5|\n|6,7,8|\n" , str(__A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : int = [[-3, -14, -10], [-5, -10, -5], [-2, -1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(minors[x][y] , a.minor(__A , __A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : int = [[-3, 14, -10], [5, -10, 5], [-2, 1, 0]]
for x in range(a.height() ):
for y in range(a.width() ):
self.assertEqual(cofactors[x][y] , a.cofactor(__A , __A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(-5 , a.determinant() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]] , 3 , 3 )
lowerCamelCase : int = Vector([1, 2, 3] )
self.assertEqual("(14,32,50)" , str(a * x ) )
self.assertEqual("|2,4,6|\n|8,10,12|\n|14,16,18|\n" , str(a * 2 ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
a.change_component(0 , 2 , 5 )
self.assertEqual("|1,2,5|\n|2,4,5|\n|6,7,8|\n" , str(__A ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
self.assertEqual(7 , a.component(2 , 1 ) , 0.01 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : Any = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|2,4,10|\n|4,8,10|\n|12,14,18|\n" , str(a + b ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = Matrix([[1, 2, 3], [2, 4, 5], [6, 7, 8]] , 3 , 3 )
lowerCamelCase : Optional[int] = Matrix([[1, 2, 7], [2, 4, 5], [6, 7, 10]] , 3 , 3 )
self.assertEqual("|0,0,-4|\n|0,0,0|\n|0,0,-2|\n" , str(a - b ) )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(
"|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n|0,0,0,0,0|\n" , str(square_zero_matrix(5 ) ) , )
if __name__ == "__main__":
unittest.main()
| 283 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_snake_case = 2_99_79_24_58
# Symbols
_snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if event is None:
lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_snake_case = transform(29_97_92_45)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_snake_case = {ct: c, x: 1, y: 1, z: 1}
_snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 283 | 1 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto.configuration_auto import CONFIG_MAPPING
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[str] = "upernet"
def __init__( self , __A=None , __A=512 , __A=0.02 , __A=[1, 2, 3, 6] , __A=True , __A=0.4 , __A=384 , __A=256 , __A=1 , __A=False , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
lowerCamelCase : Dict = CONFIG_MAPPING["resnet"](out_features=["stage1", "stage2", "stage3", "stage4"] )
elif isinstance(__A , __A ):
lowerCamelCase : Tuple = backbone_config.get("model_type" )
lowerCamelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
lowerCamelCase : Tuple = config_class.from_dict(__A )
lowerCamelCase : Any = backbone_config
lowerCamelCase : Optional[Any] = hidden_size
lowerCamelCase : Optional[Any] = initializer_range
lowerCamelCase : Dict = pool_scales
lowerCamelCase : List[str] = use_auxiliary_head
lowerCamelCase : List[str] = auxiliary_loss_weight
lowerCamelCase : List[Any] = auxiliary_in_channels
lowerCamelCase : str = auxiliary_channels
lowerCamelCase : str = auxiliary_num_convs
lowerCamelCase : Optional[int] = auxiliary_concat_input
lowerCamelCase : str = loss_ignore_index
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = copy.deepcopy(self.__dict__ )
lowerCamelCase : Union[str, Any] = self.backbone_config.to_dict()
lowerCamelCase : Any = self.__class__.model_type
return output
| 283 |
import argparse
_snake_case = '''docs/source/_static/js/custom.js'''
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase : List[str] = f.readlines()
lowerCamelCase : int = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_snake_case = parser.parse_args()
update_custom_js(args.version)
| 283 | 1 |
from __future__ import annotations
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : Tuple = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 283 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 1 |
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
_snake_case = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if got_ver is None or want_ver is None:
raise ValueError(
f"""Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"""
f""" reinstalling {pkg}.""" )
if not ops[op](version.parse(SCREAMING_SNAKE_CASE_ ) , version.parse(SCREAMING_SNAKE_CASE_ ) ):
raise ImportError(
f"""{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}""" )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = f"""\n{hint}""" if hint is not None else ""
# non-versioned check
if re.match(r"^[\w_\-\d]+$" , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase , lowerCamelCase , lowerCamelCase : Union[str, Any] = requirement, None, None
else:
lowerCamelCase : Any = re.findall(r"^([^!=<>\s]+)([\s!=<>]{1,2}.+)" , SCREAMING_SNAKE_CASE_ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"
f""" got {requirement}""" )
lowerCamelCase , lowerCamelCase : Tuple = match[0]
lowerCamelCase : Union[str, Any] = want_full.split("," ) # there could be multiple requirements
lowerCamelCase : str = {}
for w in want_range:
lowerCamelCase : Dict = re.findall(r"^([\s!=<>]{1,2})(.+)" , SCREAMING_SNAKE_CASE_ )
if not match:
raise ValueError(
"requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"
f""" but got {requirement}""" )
lowerCamelCase , lowerCamelCase : List[Any] = match[0]
lowerCamelCase : Tuple = want_ver
if op not in ops:
raise ValueError(f"""{requirement}: need one of {list(ops.keys() )}, but got {op}""" )
# special case
if pkg == "python":
lowerCamelCase : Union[str, Any] = ".".join([str(SCREAMING_SNAKE_CASE_ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return
# check if any version is installed
try:
lowerCamelCase : Tuple = importlib.metadata.version(SCREAMING_SNAKE_CASE_ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
f"""The '{requirement}' distribution was not found and is required by this application. {hint}""" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = "Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"
return require_version(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
| 283 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283 | 1 |
import re
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if len(re.findall("[ATCG]" , SCREAMING_SNAKE_CASE_ ) ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase : int = "yoso." + orig_key
return orig_key
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase : Dict = val
lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"]
lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 283 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = FileLock(str(tmpdir / "foo.lock" ) )
lowerCamelCase : Optional[int] = FileLock(str(tmpdir / "foo.lock" ) )
lowerCamelCase : Union[str, Any] = 0.01
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : int = time.time()
locka.acquire(SCREAMING_SNAKE_CASE_ )
assert time.time() - _start > timeout
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = "a" * 1000 + ".lock"
lowerCamelCase : Optional[Any] = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(SCREAMING_SNAKE_CASE_ )
assert len(os.path.basename(locka._lock_file ) ) <= 255
lowerCamelCase : Any = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
locka.acquire(0 )
| 283 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class UpperCAmelCase_ :
'''simple docstring'''
__A : str = PegasusConfig
__A : Optional[Any] = {}
__A : Union[str, Any] = "gelu"
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=False , __A=99 , __A=32 , __A=2 , __A=4 , __A=37 , __A=0.1 , __A=0.1 , __A=40 , __A=2 , __A=1 , __A=0 , ):
"""simple docstring"""
lowerCamelCase : Any = parent
lowerCamelCase : List[str] = batch_size
lowerCamelCase : List[Any] = seq_length
lowerCamelCase : Any = is_training
lowerCamelCase : Optional[int] = use_labels
lowerCamelCase : Optional[int] = vocab_size
lowerCamelCase : Any = hidden_size
lowerCamelCase : List[Any] = num_hidden_layers
lowerCamelCase : Tuple = num_attention_heads
lowerCamelCase : Optional[int] = intermediate_size
lowerCamelCase : List[str] = hidden_dropout_prob
lowerCamelCase : str = attention_probs_dropout_prob
lowerCamelCase : Tuple = max_position_embeddings
lowerCamelCase : List[str] = eos_token_id
lowerCamelCase : Optional[int] = pad_token_id
lowerCamelCase : Tuple = bos_token_id
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase : Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase : Optional[Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : int = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
lowerCamelCase : str = prepare_pegasus_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = TFPegasusModel(config=__A ).get_decoder()
lowerCamelCase : Tuple = inputs_dict["input_ids"]
lowerCamelCase : Union[str, Any] = input_ids[:1, :]
lowerCamelCase : str = inputs_dict["attention_mask"][:1, :]
lowerCamelCase : Tuple = inputs_dict["head_mask"]
lowerCamelCase : str = 1
# first forward pass
lowerCamelCase : Optional[Any] = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
lowerCamelCase , lowerCamelCase : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase : Optional[int] = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase : List[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase : List[str] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase : Any = model(__A , attention_mask=__A )[0]
lowerCamelCase : List[str] = model(__A , attention_mask=__A , past_key_values=__A )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase : Optional[Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase : Tuple = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase : str = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__A , __A , rtol=1e-3 )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None , ):
'''simple docstring'''
if attention_mask is None:
lowerCamelCase : Tuple = tf.cast(tf.math.not_equal(SCREAMING_SNAKE_CASE_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
lowerCamelCase : str = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
lowerCamelCase : List[str] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
lowerCamelCase : List[str] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
lowerCamelCase : Dict = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Optional[int] = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__A : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__A : Any = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__A : str = True
__A : str = False
__A : Optional[Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = TFPegasusModelTester(self )
lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=__A )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__A )
@require_sentencepiece
@require_tokenizers
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
__A : List[str] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
__A : str = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__A : Optional[Any] = "google/pegasus-xsum"
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _snake_case ( self , **__A ):
"""simple docstring"""
lowerCamelCase : Dict = self.translate_src_text(**__A )
assert self.expected_text == generated_words
def _snake_case ( self , **__A ):
"""simple docstring"""
lowerCamelCase : int = self.tokenizer(self.src_text , **__A , padding=__A , return_tensors="tf" )
lowerCamelCase : List[str] = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__A , )
lowerCamelCase : List[Any] = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__A )
return generated_words
@slow
def _snake_case ( self ):
"""simple docstring"""
self._assert_generated_batch_equal_expected()
| 283 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283 | 1 |
from typing import List
from .keymap import KEYMAP, get_character
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def decorator(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : List[Any] = getattr(SCREAMING_SNAKE_CASE_ , "handle_key" , [] )
handle += [key]
setattr(SCREAMING_SNAKE_CASE_ , "handle_key" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
def lowercase_( *SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def decorator(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Any = getattr(SCREAMING_SNAKE_CASE_ , "handle_key" , [] )
handle += keys
setattr(SCREAMING_SNAKE_CASE_ , "handle_key" , SCREAMING_SNAKE_CASE_ )
return func
return decorator
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __new__( cls , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : str = super().__new__(cls , __A , __A , __A )
if not hasattr(__A , "key_handler" ):
setattr(__A , "key_handler" , {} )
setattr(__A , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
lowerCamelCase : List[Any] = getattr(__A , "handle_key" , [] )
for key in handled_keys:
lowerCamelCase : int = value
return new_cls
@staticmethod
def _snake_case ( cls ):
"""simple docstring"""
lowerCamelCase : Tuple = get_character()
if char != KEYMAP["undefined"]:
lowerCamelCase : List[Any] = ord(__A )
lowerCamelCase : Tuple = cls.key_handler.get(__A )
if handler:
lowerCamelCase : List[Any] = char
return handler(cls )
else:
return None
def lowercase_( cls ):
'''simple docstring'''
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 283 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[Any] = BioGptTokenizer
__A : Optional[int] = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = "lower newer"
lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : Optional[int] = "lower"
lowerCamelCase : Any = ["low", "er</w>"]
lowerCamelCase : List[str] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokens + ["<unk>"]
lowerCamelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 283 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_snake_case = None
_snake_case = logging.get_logger(__name__)
_snake_case = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_snake_case = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
_snake_case = {
'''google/fnet-base''': 5_12,
'''google/fnet-large''': 5_12,
}
_snake_case = '''▁'''
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[str] = VOCAB_FILES_NAMES
__A : Any = PRETRAINED_VOCAB_FILES_MAP
__A : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__A : Optional[int] = ["input_ids", "token_type_ids"]
__A : Tuple = FNetTokenizer
def __init__( self , __A=None , __A=None , __A=False , __A=True , __A=True , __A="<unk>" , __A="[SEP]" , __A="<pad>" , __A="[CLS]" , __A="[MASK]" , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
super().__init__(
__A , tokenizer_file=__A , do_lower_case=__A , remove_space=__A , keep_accents=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , **__A , )
lowerCamelCase : Tuple = do_lower_case
lowerCamelCase : str = remove_space
lowerCamelCase : List[str] = keep_accents
lowerCamelCase : Dict = vocab_file
lowerCamelCase : Dict = False if not self.vocab_file else True
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Tuple = [self.sep_token_id]
lowerCamelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
lowerCamelCase : Dict = [self.sep_token_id]
lowerCamelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self , __A , __A = None ):
"""simple docstring"""
if not os.path.isdir(__A ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase : Dict = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ):
copyfile(self.vocab_file , __A )
return (out_vocab_file,)
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase : List[Any] = 1
lowerCamelCase : Union[str, Any] = 1
while repunit:
lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : List[str] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
import os
import unittest
from transformers.models.bartpho.tokenization_bartpho import VOCAB_FILES_NAMES, BartphoTokenizer
from transformers.testing_utils import get_tests_dir
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/test_sentencepiece_bpe.model''')
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[str] = BartphoTokenizer
__A : Union[str, Any] = False
__A : str = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase : List[str] = ["▁This", "▁is", "▁a", "▁t", "est"]
lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : int = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["monolingual_vocab_file"] )
with open(self.monolingual_vocab_file , "w" , encoding="utf-8" ) as fp:
for token in vocab_tokens:
fp.write(F"""{token} {vocab_tokens[token]}\n""" )
lowerCamelCase : List[Any] = BartphoTokenizer(__A , self.monolingual_vocab_file , **self.special_tokens_map )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self , **__A ):
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return BartphoTokenizer.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = "This is a là test"
lowerCamelCase : Union[str, Any] = "This is a<unk><unk> test"
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = BartphoTokenizer(__A , self.monolingual_vocab_file , **self.special_tokens_map )
lowerCamelCase : Any = "This is a là test"
lowerCamelCase : Union[str, Any] = "▁This ▁is ▁a ▁l à ▁t est".split()
lowerCamelCase : List[Any] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : List[str] = tokens + [tokenizer.unk_token]
lowerCamelCase : Any = [4, 5, 6, 3, 3, 7, 8, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
| 283 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Tuple = "backbone." if is_semantic else ""
lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : Optional[Any] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = q_bias
lowerCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase : int = gamma_a
lowerCamelCase : Optional[Any] = gamma_a
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = val
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "rvlcdip-id2label.json"
lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
lowerCamelCase : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
lowerCamelCase : Optional[Any] = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = outputs.logits
# verify logits
lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 | 1 |
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = '''▁'''
_snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = BigBirdTokenizer
__A : Union[str, Any] = BigBirdTokenizerFast
__A : Optional[Any] = True
__A : Tuple = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase : Tuple = self.tokenizer_class(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = "<s>"
lowerCamelCase : Any = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(__A ) , 1004 )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : List[str] = self.get_rust_tokenizer()
lowerCamelCase : List[Any] = "I was born in 92000, and this is falsé."
lowerCamelCase : str = tokenizer.tokenize(__A )
lowerCamelCase : Any = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Optional[Any] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCamelCase : Union[str, Any] = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : List[Any] = tokenizer.encode(__A )
lowerCamelCase : Optional[int] = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = BigBirdTokenizer(__A , keep_accents=__A )
lowerCamelCase : int = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
lowerCamelCase : int = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : Dict = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase : Any = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "Hello World!"
lowerCamelCase : Tuple = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
lowerCamelCase : Dict = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def _snake_case ( self ):
"""simple docstring"""
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
lowerCamelCase : List[str] = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase : str = " ".join(__A )
lowerCamelCase : Union[str, Any] = self.big_tokenizer.encode_plus(__A , return_tensors="pt" , return_token_type_ids=__A )
lowerCamelCase : Optional[int] = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=__A )
lowerCamelCase : List[str] = BigBirdConfig(attention_type="original_full" )
lowerCamelCase : List[str] = BigBirdModel(__A )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
lowerCamelCase : int = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = {"input_ids": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 283 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 | 1 |
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : int = (UnCLIPScheduler,)
def _snake_case ( self , **__A ):
"""simple docstring"""
lowerCamelCase : List[Any] = {
"num_train_timesteps": 1000,
"variance_type": "fixed_small_log",
"clip_sample": True,
"clip_sample_range": 1.0,
"prediction_type": "epsilon",
}
config.update(**__A )
return config
def _snake_case ( self ):
"""simple docstring"""
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=__A )
def _snake_case ( self ):
"""simple docstring"""
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=__A )
def _snake_case ( self ):
"""simple docstring"""
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__A )
def _snake_case ( self ):
"""simple docstring"""
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=__A )
def _snake_case ( self ):
"""simple docstring"""
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=__A )
def _snake_case ( self ):
"""simple docstring"""
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=__A , prev_timestep=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : Tuple = self.get_scheduler_config(variance_type="fixed_small_log" )
lowerCamelCase : List[str] = scheduler_class(**__A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00e-10 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1e-5
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.scheduler_classes[0]
lowerCamelCase : int = self.get_scheduler_config(variance_type="learned_range" )
lowerCamelCase : Union[str, Any] = scheduler_class(**__A )
lowerCamelCase : Optional[int] = 0.5
assert scheduler._get_variance(1 , predicted_variance=__A ) - -10.1712790 < 1e-5
assert scheduler._get_variance(487 , predicted_variance=__A ) - -5.7998052 < 1e-5
assert scheduler._get_variance(999 , predicted_variance=__A ) - -0.0010011 < 1e-5
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = self.scheduler_classes[0]
lowerCamelCase : List[str] = self.get_scheduler_config()
lowerCamelCase : Union[str, Any] = scheduler_class(**__A )
lowerCamelCase : Any = scheduler.timesteps
lowerCamelCase : Optional[int] = self.dummy_model()
lowerCamelCase : List[Any] = self.dummy_sample_deter
lowerCamelCase : List[Any] = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCamelCase : Optional[Any] = model(__A , __A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase : Optional[Any] = scheduler.step(__A , __A , __A , generator=__A ).prev_sample
lowerCamelCase : List[Any] = pred_prev_sample
lowerCamelCase : str = torch.sum(torch.abs(__A ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 252.2682495 ) < 1e-2
assert abs(result_mean.item() - 0.3284743 ) < 1e-3
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase : int = self.get_scheduler_config()
lowerCamelCase : int = scheduler_class(**__A )
scheduler.set_timesteps(25 )
lowerCamelCase : str = scheduler.timesteps
lowerCamelCase : Optional[int] = self.dummy_model()
lowerCamelCase : List[str] = self.dummy_sample_deter
lowerCamelCase : Dict = torch.manual_seed(0 )
for i, t in enumerate(__A ):
# 1. predict noise residual
lowerCamelCase : Optional[Any] = model(__A , __A )
if i + 1 == timesteps.shape[0]:
lowerCamelCase : Union[str, Any] = None
else:
lowerCamelCase : Union[str, Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
lowerCamelCase : Any = scheduler.step(
__A , __A , __A , prev_timestep=__A , generator=__A ).prev_sample
lowerCamelCase : List[Any] = pred_prev_sample
lowerCamelCase : List[str] = torch.sum(torch.abs(__A ) )
lowerCamelCase : List[str] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 258.2044983 ) < 1e-2
assert abs(result_mean.item() - 0.3362038 ) < 1e-3
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ):
lowerCamelCase : Tuple = False
for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j]
lowerCamelCase : Optional[int] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 283 | 1 |
from __future__ import annotations
import queue
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = data
lowerCamelCase : int = None
lowerCamelCase : Optional[Any] = None
def lowercase_( ):
'''simple docstring'''
print("\n********Press N to stop entering at any point of time********\n" )
lowerCamelCase : List[str] = input("Enter the value of the root node: " ).strip().lower()
lowerCamelCase : queue.Queue = queue.Queue()
lowerCamelCase : str = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : List[str] = q.get()
lowerCamelCase : Tuple = f"""Enter the left node of {node_found.data}: """
lowerCamelCase : Any = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or "n"
if check == "n":
return tree_node
lowerCamelCase : Dict = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : List[Any] = left_node
q.put(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[int] = f"""Enter the right node of {node_found.data}: """
lowerCamelCase : int = input(SCREAMING_SNAKE_CASE_ ).strip().lower() or "n"
if check == "n":
return tree_node
lowerCamelCase : Union[str, Any] = TreeNode(int(SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : List[str] = right_node
q.put(SCREAMING_SNAKE_CASE_ )
raise
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
print(node.data , end="," )
pre_order(node.left )
pre_order(node.right )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
in_order(node.left )
print(node.data , end="," )
in_order(node.right )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
post_order(node.left )
post_order(node.right )
print(node.data , end="," )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : List[str] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
q.put(node_dequeued.left )
if node_dequeued.right:
q.put(node_dequeued.right )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : queue.Queue = queue.Queue()
q.put(SCREAMING_SNAKE_CASE_ )
while not q.empty():
lowerCamelCase : Tuple = []
while not q.empty():
lowerCamelCase : Optional[int] = q.get()
print(node_dequeued.data , end="," )
if node_dequeued.left:
list_.append(node_dequeued.left )
if node_dequeued.right:
list_.append(node_dequeued.right )
print()
for node in list_:
q.put(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : list[TreeNode] = []
lowerCamelCase : Optional[int] = node
while n or stack:
while n: # start from root node, find its left child
print(n.data , end="," )
stack.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = n.left
# end of while means current node doesn't have left child
lowerCamelCase : List[str] = stack.pop()
# start to traverse its right child
lowerCamelCase : Optional[int] = n.right
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase : list[TreeNode] = []
lowerCamelCase : List[str] = node
while n or stack:
while n:
stack.append(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Union[str, Any] = n.left
lowerCamelCase : Dict = stack.pop()
print(n.data , end="," )
lowerCamelCase : List[str] = n.right
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not node:
return
lowerCamelCase , lowerCamelCase : Any = [], []
lowerCamelCase : Optional[int] = node
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # to find the reversed order of post order, store it in stack2
lowerCamelCase : Tuple = stacka.pop()
if n.left:
stacka.append(n.left )
if n.right:
stacka.append(n.right )
stacka.append(SCREAMING_SNAKE_CASE_ )
while stacka: # pop up from stack2 will be the post order
print(stacka.pop().data , end="," )
def lowercase_( SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_=50 , SCREAMING_SNAKE_CASE_="*" ):
'''simple docstring'''
if not s:
return "\n" + width * char
lowerCamelCase , lowerCamelCase : str = divmod(width - len(SCREAMING_SNAKE_CASE_ ) - 2 , 2 )
return f"""{left * char} {s} {(left + extra) * char}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
print(prompt('''Binary Tree Traversals'''))
_snake_case = build_tree()
print(prompt('''Pre Order Traversal'''))
pre_order(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal'''))
in_order(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal'''))
post_order(node)
print(prompt() + '''\n''')
print(prompt('''Level Order Traversal'''))
level_order(node)
print(prompt() + '''\n''')
print(prompt('''Actual Level Order Traversal'''))
level_order_actual(node)
print('''*''' * 50 + '''\n''')
print(prompt('''Pre Order Traversal - Iteration Version'''))
pre_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''In Order Traversal - Iteration Version'''))
in_order_iter(node)
print(prompt() + '''\n''')
print(prompt('''Post Order Traversal - Iteration Version'''))
post_order_iter(node)
print(prompt())
| 283 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = StableUnCLIPPipeline
__A : Optional[int] = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 32
lowerCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A )
lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
lowerCamelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = AutoencoderKL()
lowerCamelCase : Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , __A , __A=0 ):
"""simple docstring"""
if str(__A ).startswith("mps" ):
lowerCamelCase : Optional[int] = torch.manual_seed(__A )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 283 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
_snake_case = {
'''configuration_ctrl''': ['''CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CTRLConfig'''],
'''tokenization_ctrl''': ['''CTRLTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CTRLForSequenceClassification''',
'''CTRLLMHeadModel''',
'''CTRLModel''',
'''CTRLPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCTRLForSequenceClassification''',
'''TFCTRLLMHeadModel''',
'''TFCTRLModel''',
'''TFCTRLPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "decision_transformer"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Tuple = act_dim
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[Any] = max_ep_len
lowerCamelCase : Union[str, Any] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : Dict = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : List[Any] = n_inner
lowerCamelCase : Any = activation_function
lowerCamelCase : Optional[int] = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Tuple = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = scale_attn_weights
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Optional[int] = reorder_and_upcast_attn
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : Any = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 283 | 1 |
import requests
from bsa import BeautifulSoup
def lowercase_( SCREAMING_SNAKE_CASE_ = "AAPL" ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
lowerCamelCase : List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , "html.parser" )
lowerCamelCase : Any = "My(6px) Pos(r) smartphone_Mt(6px)"
return soup.find("div" , class_=class_ ).find("span" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(f'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
_snake_case = get_logger(__name__)
class UpperCAmelCase_ :
'''simple docstring'''
__A : Tuple = "dummy_data"
__A : Tuple = "datasets"
__A : List[Any] = False
def __init__( self , __A , __A , __A , __A = None , __A = False , __A = True , __A = None , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = 0
lowerCamelCase : Dict = dataset_name
lowerCamelCase : Optional[int] = cache_dir
lowerCamelCase : Union[str, Any] = use_local_dummy_data
lowerCamelCase : Any = config
# download_callbacks take a single url as input
lowerCamelCase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowerCamelCase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowerCamelCase : str = str(__A )
# to be downloaded
lowerCamelCase : Any = None
lowerCamelCase : Optional[int] = None
@property
def _snake_case ( self ):
"""simple docstring"""
if self._dummy_file is None:
lowerCamelCase : Dict = self.download_dummy_data()
return self._dummy_file
@property
def _snake_case ( self ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" , self.version_name )
@property
def _snake_case ( self ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder , "dummy_data.zip" )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowerCamelCase : List[str] = cached_path(
__A , cache_dir=self.cache_dir , extract_compressed_file=__A , force_extract=__A )
return os.path.join(__A , self.dummy_file_name )
@property
def _snake_case ( self ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def _snake_case ( self ):
"""simple docstring"""
if self._bucket_url is None:
lowerCamelCase : Any = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , "/" ) )
return self._bucket_url
@property
def _snake_case ( self ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , "/" ).split("/" )[:-1] )
def _snake_case ( self , __A , *__A ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowerCamelCase : Tuple = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowerCamelCase : Tuple = self.dummy_file_name
# special case when data_url is a dict
if isinstance(__A , __A ):
return self.create_dummy_data_dict(__A , __A )
elif isinstance(__A , (list, tuple) ):
return self.create_dummy_data_list(__A , __A )
else:
return self.create_dummy_data_single(__A , __A )
def _snake_case ( self , __A , *__A ):
"""simple docstring"""
return self.download_and_extract(__A )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
return self.download_and_extract(__A )
def _snake_case ( self , __A , *__A , **__A ):
"""simple docstring"""
return path
def _snake_case ( self ):
"""simple docstring"""
return {}
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(__A , __A ):
for single_url in single_urls:
download_callback(__A )
else:
lowerCamelCase : Any = single_urls
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(__A , __A ):
lowerCamelCase : List[str] = [os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) ) for x in single_urls]
else:
lowerCamelCase : Any = single_urls
lowerCamelCase : Optional[int] = os.path.join(__A , urllib.parse.quote_plus(Path(__A ).name ) )
lowerCamelCase : Any = value
# make sure that values are unique
if all(isinstance(__A , __A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowerCamelCase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : str = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowerCamelCase : str = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" , __A ) ) for url in data_url )
lowerCamelCase : Optional[int] = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowerCamelCase : Optional[int] = [data_url[0]] * len(__A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase : Optional[int] = os.path.join(__A , urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(__A )
return dummy_data_list
def _snake_case ( self , __A , __A ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(__A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowerCamelCase : Optional[Any] = os.path.join(__A , urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(__A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self , __A ):
"""simple docstring"""
def _iter_archive_members(__A ):
# this preserves the order of the members inside the ZIP archive
lowerCamelCase : Optional[Any] = Path(self.dummy_file ).parent
lowerCamelCase : List[Any] = path.relative_to(__A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowerCamelCase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(__A )
lowerCamelCase : Optional[Any] = Path(__A )
lowerCamelCase : Optional[Any] = _iter_archive_members(__A ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(__A ).as_posix(), file_path.open("rb" )
def _snake_case ( self , __A ):
"""simple docstring"""
if not isinstance(__A , __A ):
lowerCamelCase : List[Any] = [paths]
for path in paths:
if os.path.isfile(__A ):
if os.path.basename(__A ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(__A ):
if os.path.basename(__A ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(__A ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(__A , __A )
| 283 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return EnvironmentCommand()
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
@staticmethod
def _snake_case ( __A ):
"""simple docstring"""
lowerCamelCase : List[Any] = parser.add_parser("env" )
download_parser.set_defaults(func=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = huggingface_hub.__version__
lowerCamelCase : str = "not installed"
lowerCamelCase : Optional[int] = "NA"
if is_torch_available():
import torch
lowerCamelCase : Tuple = torch.__version__
lowerCamelCase : Union[str, Any] = torch.cuda.is_available()
lowerCamelCase : Union[str, Any] = "not installed"
if is_transformers_available():
import transformers
lowerCamelCase : Tuple = transformers.__version__
lowerCamelCase : List[Any] = "not installed"
if is_accelerate_available():
import accelerate
lowerCamelCase : List[Any] = accelerate.__version__
lowerCamelCase : Optional[int] = "not installed"
if is_xformers_available():
import xformers
lowerCamelCase : Any = xformers.__version__
lowerCamelCase : Optional[int] = {
"`diffusers` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"PyTorch version (GPU?)": F"""{pt_version} ({pt_cuda_available})""",
"Huggingface_hub version": hub_version,
"Transformers version": transformers_version,
"Accelerate version": accelerate_version,
"xFormers version": xformers_version,
"Using GPU in script?": "<fill in>",
"Using distributed or parallel set-up in script?": "<fill in>",
}
print("\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n" )
print(self.format_dict(__A ) )
return info
@staticmethod
def _snake_case ( __A ):
"""simple docstring"""
return "\n".join([F"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 283 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , ):
"""simple docstring"""
lowerCamelCase : str = parent
lowerCamelCase : Union[str, Any] = 13
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = 99
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = 32
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = 4
lowerCamelCase : List[str] = 0.1
lowerCamelCase : int = 0.1
lowerCamelCase : int = 512
lowerCamelCase : List[Any] = 16
lowerCamelCase : Any = 2
lowerCamelCase : Any = 0.02
lowerCamelCase : List[str] = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : int = "last"
lowerCamelCase : int = True
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 0
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : str = None
if self.use_token_type_ids:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Dict = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A )
lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : Dict = model(__A )
lowerCamelCase : Any = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A )
lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A )
lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A )
lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__A : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : List[str] = False
def _snake_case ( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFFlaubertModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCamelCase : str = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : Dict = model(__A )[0]
lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 283 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283 | 1 |
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaises(__A ):
lowerCamelCase : Any = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaises(__A ):
lowerCamelCase : Optional[int] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("bool" ) , type=Value("int64" ) ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = pa.array(TypedSequence([1, 2, 3] , type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase : Tuple = pa.array(TypedSequence(["foo", "bar"] , type=Value("int64" ) ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = pa.array(TypedSequence([1, 2, 3] , try_type=Value("int32" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = pa.array(TypedSequence(["foo", "bar"] , try_type=Value("int64" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _snake_case ( self ):
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowerCamelCase : Tuple = pa.array(TypedSequence(["foo", "bar"] , type=ArrayaD((1, 3) , "int64" ) ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , "int64" ) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = pa.array(TypedSequence(["foo", "bar"] , try_type=ArrayaD((1, 3) , "int64" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self ):
"""simple docstring"""
import PIL.Image
lowerCamelCase : Union[str, Any] = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"datasets.arrow_writer.cast_to_python_objects" , side_effect=__A ) as mock_cast_to_python_objects:
lowerCamelCase : List[str] = pa.array(TypedSequence([{"path": None, "bytes": B"image_bytes"}, pil_image] , type=Image() ) )
lowerCamelCase , lowerCamelCase : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("optimize_list_casting" , __A )
self.assertFalse(kwargs["optimize_list_casting"] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = pa.BufferReader(SCREAMING_SNAKE_CASE_ ) if isinstance(SCREAMING_SNAKE_CASE_ , pa.Buffer ) else pa.memory_map(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = pa.ipc.open_stream(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = pa.BufferOutputStream()
lowerCamelCase : Optional[Any] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase , lowerCamelCase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase : int = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Any = pa.BufferOutputStream()
lowerCamelCase : int = Features({"labels": ClassLabel(names=["neg", "pos"] )} )
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"labels": 0} )
writer.write({"labels": 1} )
lowerCamelCase , lowerCamelCase : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowerCamelCase : Union[str, Any] = pa.BufferReader(output.getvalue() )
lowerCamelCase : List[str] = pa.ipc.open_stream(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : pa.Table = f.read_all()
lowerCamelCase : str = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(SCREAMING_SNAKE_CASE_ )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="split_name" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
writer.write({"col_1": "foo", "col_2": 1} , key=[1, 2] )
lowerCamelCase , lowerCamelCase : Dict = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="split_name" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
with pytest.raises(SCREAMING_SNAKE_CASE_ ):
writer.write({"col_1": "foo", "col_2": 1} , key=10 )
writer.write({"col_1": "bar", "col_2": 2} , key=10 )
lowerCamelCase , lowerCamelCase : List[Any] = writer.finalize()
@pytest.mark.parametrize("writer_batch_size" , [None, 2, 10] )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = pa.BufferOutputStream()
with ArrowWriter(
stream=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ , hash_salt="split_name" , check_duplicates=SCREAMING_SNAKE_CASE_ , ) as writer:
writer.write({"col_1": "foo", "col_2": 1} , key=1 )
writer.write({"col_1": "bar", "col_2": 2} , key=2 )
lowerCamelCase , lowerCamelCase : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = pa.BufferOutputStream()
lowerCamelCase : Optional[int] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
writer.write_batch({"col_1": [], "col_2": []} )
lowerCamelCase , lowerCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase : List[Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = pa.BufferOutputStream()
lowerCamelCase : List[Any] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_table(pa.Table.from_pydict({"col_1": ["foo", "bar"], "col_2": [1, 2]} ) )
lowerCamelCase , lowerCamelCase : Union[str, Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase : Optional[int] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("writer_batch_size" , [None, 1, 10] )
@pytest.mark.parametrize(
"fields" , [None, {"col_1": pa.string(), "col_2": pa.intaa()}, {"col_1": pa.string(), "col_2": pa.intaa()}] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = pa.BufferOutputStream()
lowerCamelCase : List[str] = pa.schema(SCREAMING_SNAKE_CASE_ ) if fields else None
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ , schema=SCREAMING_SNAKE_CASE_ , writer_batch_size=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write_row(pa.Table.from_pydict({"col_1": ["foo"], "col_2": [1]} ) )
writer.write_row(pa.Table.from_pydict({"col_1": ["bar"], "col_2": [2]} ) )
lowerCamelCase , lowerCamelCase : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowerCamelCase : Optional[Any] = {"col_1": pa.string(), "col_2": pa.intaa()}
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def lowercase_( ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase : Any = {"col_1": pa.string(), "col_2": pa.intaa()}
lowerCamelCase : Any = os.path.join(SCREAMING_SNAKE_CASE_ , "test.arrow" )
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ , schema=pa.schema(SCREAMING_SNAKE_CASE_ ) ) as writer:
writer.write_batch({"col_1": ["foo", "bar"], "col_2": [1, 2]} )
lowerCamelCase , lowerCamelCase : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(SCREAMING_SNAKE_CASE_ , metadata=writer._schema.metadata )
_check_output(SCREAMING_SNAKE_CASE_ , 1 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if pa.types.is_list(SCREAMING_SNAKE_CASE_ ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if isinstance(lst[0] , SCREAMING_SNAKE_CASE_ ):
change_first_primitive_element_in_list(lst[0] , SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : List[str] = value
@pytest.mark.parametrize("optimized_int_type, expected_dtype" , [(None, pa.intaa()), (Value("int32" ), pa.intaa())] )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = pa.array(TypedSequence(SCREAMING_SNAKE_CASE_ , optimized_int_type=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"col, expected_dtype" , [
("attention_mask", pa.inta()),
("special_tokens_mask", pa.inta()),
("token_type_ids", pa.inta()),
("input_ids", pa.intaa()),
("other", pa.intaa()),
] , )
@pytest.mark.parametrize("sequence" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE_ , col=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowerCamelCase : Union[str, Any] = copy.deepcopy(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Optional[Any] = pa.array(OptimizedTypedSequence(SCREAMING_SNAKE_CASE_ , col=SCREAMING_SNAKE_CASE_ ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("raise_exception" , [False, True] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = str(tmp_path / "dataset-train.arrow" )
try:
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = "mock://dataset-train.arrow"
with ArrowWriter(path=SCREAMING_SNAKE_CASE_ , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(SCREAMING_SNAKE_CASE_ ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase , lowerCamelCase : Optional[int] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(SCREAMING_SNAKE_CASE_ )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = pa.BufferOutputStream()
with ParquetWriter(stream=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"col_1": "foo", "col_2": 1} )
writer.write({"col_1": "bar", "col_2": 2} )
lowerCamelCase , lowerCamelCase : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowerCamelCase : Dict = pa.BufferReader(output.getvalue() )
lowerCamelCase : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE_ )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("embed_local_files" , [False, True] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
import PIL.Image
lowerCamelCase : Dict = str(tmp_path / "test_image_rgb.jpg" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(SCREAMING_SNAKE_CASE_ , format="png" )
lowerCamelCase : Any = pa.BufferOutputStream()
with ParquetWriter(
stream=SCREAMING_SNAKE_CASE_ , features=Features({"image": Image()} ) , embed_local_files=SCREAMING_SNAKE_CASE_ ) as writer:
writer.write({"image": image_path} )
writer.finalize()
lowerCamelCase : Dict = pa.BufferReader(output.getvalue() )
lowerCamelCase : pa.Table = pq.read_table(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["image"][0]["path"] , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "rb" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Tuple = pa.schema([pa.field("col_1" , pa.string() , nullable=SCREAMING_SNAKE_CASE_ )] )
lowerCamelCase : int = pa.BufferOutputStream()
with ArrowWriter(stream=SCREAMING_SNAKE_CASE_ ) as writer:
writer._build_writer(inferred_schema=SCREAMING_SNAKE_CASE_ )
assert writer._schema == pa.schema([pa.field("col_1" , pa.string() )] )
| 283 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 1 |
from typing import List
import numpy as np
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Any = {key: len(SCREAMING_SNAKE_CASE_ ) for key, value in gen_kwargs.items() if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(f"""\t- key {key} has length {length}""" for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
lowerCamelCase : Any = max(lists_lengths.values() , default=0 )
return max(1 , SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = []
for group_idx in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Dict = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
lowerCamelCase : str = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
lowerCamelCase : List[str] = range(SCREAMING_SNAKE_CASE_ , start + num_shards_to_add )
shards_indices_per_group.append(SCREAMING_SNAKE_CASE_ )
return shards_indices_per_group
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE_ )
if num_shards == 1:
return [dict(SCREAMING_SNAKE_CASE_ )]
else:
lowerCamelCase : Tuple = _distribute_shards(num_shards=SCREAMING_SNAKE_CASE_ , max_num_jobs=SCREAMING_SNAKE_CASE_ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(SCREAMING_SNAKE_CASE_ ) )
]
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , SCREAMING_SNAKE_CASE_ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[int] = {len(SCREAMING_SNAKE_CASE_ ) for value in gen_kwargs.values() if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
lowerCamelCase : List[str] = {}
for size in list_sizes:
lowerCamelCase : Tuple = list(range(SCREAMING_SNAKE_CASE_ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
lowerCamelCase : Any = dict(SCREAMING_SNAKE_CASE_ )
for key, value in shuffled_kwargs.items():
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[int] = [value[i] for i in indices_per_size[len(SCREAMING_SNAKE_CASE_ )]]
return shuffled_kwargs
| 283 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_snake_case = 2_99_79_24_58
# Symbols
_snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if event is None:
lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_snake_case = transform(29_97_92_45)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_snake_case = {ct: c, x: 1, y: 1, z: 1}
_snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 283 | 1 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A , __A ):
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError("Destination width/height should be > 0" )
lowerCamelCase : Optional[int] = img
lowerCamelCase : Any = img.shape[1]
lowerCamelCase : Optional[Any] = img.shape[0]
lowerCamelCase : List[Any] = dst_width
lowerCamelCase : int = dst_height
lowerCamelCase : List[str] = self.src_w / self.dst_w
lowerCamelCase : Dict = self.src_h / self.dst_h
lowerCamelCase : str = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 255
)
def _snake_case ( self ):
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
lowerCamelCase : Optional[int] = self.img[self.get_y(__A )][self.get_x(__A )]
def _snake_case ( self , __A ):
"""simple docstring"""
return int(self.ratio_x * x )
def _snake_case ( self , __A ):
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
_snake_case , _snake_case = 8_00, 6_00
_snake_case = imread('''image_data/lena.jpg''', 1)
_snake_case = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'''Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}''', n.output
)
waitKey(0)
destroyAllWindows()
| 283 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return "".join(chr(ord(SCREAMING_SNAKE_CASE_ ) - 32 ) if "a" <= char <= "z" else char for char in word )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 283 |
import argparse
_snake_case = '''docs/source/_static/js/custom.js'''
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase : List[str] = f.readlines()
lowerCamelCase : int = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_snake_case = parser.parse_args()
update_custom_js(args.version)
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase , lowerCamelCase : Any = [], []
while len(SCREAMING_SNAKE_CASE_ ) > 1:
lowerCamelCase , lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE_ ), max(SCREAMING_SNAKE_CASE_ )
start.append(SCREAMING_SNAKE_CASE_ )
end.append(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
collection.remove(SCREAMING_SNAKE_CASE_ )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(*merge_sort(unsorted), sep=''',''')
| 283 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = len(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = sum(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Tuple = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCamelCase : List[str] = True
for i in range(1 , s + 1 ):
lowerCamelCase : Dict = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCamelCase : Tuple = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCamelCase : Dict = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCamelCase : Any = s - 2 * j
break
return diff
| 283 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : int = "switch_transformers"
__A : int = ["past_key_values"]
__A : Dict = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , __A=3_2128 , __A=768 , __A=64 , __A=2048 , __A=64 , __A=12 , __A=3 , __A=12 , __A=3 , __A=12 , __A=8 , __A=False , __A=0.01 , __A="float32" , __A=False , __A=32 , __A=128 , __A=0.1 , __A=1e-6 , __A=0.001 , __A=0.001 , __A=1.0 , __A="relu" , __A=True , __A=False , __A=True , __A=0 , __A=1 , **__A , ):
"""simple docstring"""
lowerCamelCase : str = vocab_size
lowerCamelCase : Any = d_model
lowerCamelCase : int = d_kv
lowerCamelCase : Union[str, Any] = d_ff
lowerCamelCase : str = num_sparse_encoder_layers
lowerCamelCase : List[str] = num_layers
lowerCamelCase : str = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowerCamelCase : Tuple = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowerCamelCase : List[str] = self.num_layers // self.num_sparse_encoder_layers
else:
lowerCamelCase : Union[str, Any] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowerCamelCase : Any = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowerCamelCase : Tuple = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowerCamelCase : Any = num_heads
lowerCamelCase : List[Any] = num_experts
lowerCamelCase : str = expert_capacity
lowerCamelCase : List[Any] = router_bias
lowerCamelCase : int = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(F"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
lowerCamelCase : Tuple = router_dtype
lowerCamelCase : int = router_ignore_padding_tokens
lowerCamelCase : Union[str, Any] = relative_attention_num_buckets
lowerCamelCase : int = relative_attention_max_distance
lowerCamelCase : Tuple = dropout_rate
lowerCamelCase : List[str] = layer_norm_epsilon
lowerCamelCase : List[Any] = initializer_factor
lowerCamelCase : List[Any] = feed_forward_proj
lowerCamelCase : Optional[int] = use_cache
lowerCamelCase : int = add_router_probs
lowerCamelCase : List[Any] = router_z_loss_coef
lowerCamelCase : Union[str, Any] = router_aux_loss_coef
lowerCamelCase : Optional[int] = self.feed_forward_proj.split("-" )
lowerCamelCase : Tuple = act_info[-1]
lowerCamelCase : Union[str, Any] = act_info[0] == "gated"
if len(__A ) > 1 and act_info[0] != "gated" or len(__A ) > 2:
raise ValueError(
F"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. "
"'gated-gelu' or 'relu'" )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowerCamelCase : Any = "gelu_new"
super().__init__(
pad_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , **__A , )
| 283 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase : int = "yoso." + orig_key
return orig_key
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase : Dict = val
lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"]
lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 283 | 1 |
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.model_selection import train_test_split
from xgboost import XGBRegressor
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (data["data"], data["target"])
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = XGBRegressor(verbosity=0 , random_state=42 )
xgb.fit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Predict target for test data
lowerCamelCase : List[str] = xgb.predict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = predictions.reshape(len(SCREAMING_SNAKE_CASE_ ) , 1 )
return predictions
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Any = fetch_california_housing()
lowerCamelCase , lowerCamelCase : List[Any] = data_handling(SCREAMING_SNAKE_CASE_ )
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase : Any = train_test_split(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , test_size=0.25 , random_state=1 )
lowerCamelCase : List[str] = xgboost(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Error printing
print(f"""Mean Absolute Error : {mean_absolute_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
print(f"""Mean Square Error : {mean_squared_error(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}""" )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 283 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 | 1 |
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = set_counts
lowerCamelCase : List[str] = max(__A )
lowerCamelCase : Optional[Any] = len(__A )
lowerCamelCase : int = [1] * num_sets
lowerCamelCase : Tuple = list(range(__A ) )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Dict = self.get_parent(__A )
lowerCamelCase : int = self.get_parent(__A )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
lowerCamelCase : List[str] = 0
lowerCamelCase : Tuple = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
lowerCamelCase : Tuple = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
lowerCamelCase : List[str] = 0
lowerCamelCase : Optional[int] = src_parent
lowerCamelCase : int = self.set_counts[src_parent]
lowerCamelCase : Dict = max(self.max_set , __A )
return True
def _snake_case ( self , __A ):
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
lowerCamelCase : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 283 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = tempfile.mkdtemp()
# fmt: off
lowerCamelCase : Any = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
lowerCamelCase : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : List[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
lowerCamelCase : Optional[Any] = {"unk_token": "<unk>"}
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__A ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__A ) )
lowerCamelCase : str = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48145466, 0.4578275, 0.40821073],
"image_std": [0.26862954, 0.26130258, 0.27577711],
}
lowerCamelCase : str = os.path.join(self.tmpdirname , __A )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(__A , __A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **__A )
def _snake_case ( self , **__A ):
"""simple docstring"""
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase : Tuple = [Image.fromarray(np.moveaxis(__A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_tokenizer()
lowerCamelCase : Optional[Any] = self.get_rust_tokenizer()
lowerCamelCase : Tuple = self.get_image_processor()
lowerCamelCase : List[Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __A )
self.assertIsInstance(processor_fast.tokenizer , __A )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __A )
self.assertIsInstance(processor_fast.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
lowerCamelCase : List[str] = self.get_image_processor(do_normalize=__A )
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=__A )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __A )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = self.prepare_image_inputs()
lowerCamelCase : int = image_processor(__A , return_tensors="np" )
lowerCamelCase : Union[str, Any] = processor(images=__A , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.get_image_processor()
lowerCamelCase : Dict = self.get_tokenizer()
lowerCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Tuple = "lower newer"
lowerCamelCase : Union[str, Any] = processor(text=__A , return_tensors="np" )
lowerCamelCase : List[Any] = tokenizer(__A , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : Any = self.get_tokenizer()
lowerCamelCase : int = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[Any] = "lower newer"
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Any = processor(text=__A , images=__A )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "google/owlvit-base-patch32"
lowerCamelCase : List[Any] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Tuple = ["cat", "nasa badge"]
lowerCamelCase : str = processor(text=__A )
lowerCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = "google/owlvit-base-patch32"
lowerCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
lowerCamelCase : int = processor(text=__A )
lowerCamelCase : Tuple = 16
lowerCamelCase : Any = len(__A )
lowerCamelCase : Optional[Any] = max([len(__A ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = "google/owlvit-base-patch32"
lowerCamelCase : Tuple = OwlViTProcessor.from_pretrained(__A )
lowerCamelCase : List[Any] = ["cat", "nasa badge"]
lowerCamelCase : Optional[Any] = processor(text=__A )
lowerCamelCase : int = 16
lowerCamelCase : List[str] = inputs["input_ids"]
lowerCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = self.get_image_processor()
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : str = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Dict = self.prepare_image_inputs()
lowerCamelCase : Union[str, Any] = self.prepare_image_inputs()
lowerCamelCase : Any = processor(images=__A , query_images=__A )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(__A ):
processor()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.get_image_processor()
lowerCamelCase : Optional[int] = self.get_tokenizer()
lowerCamelCase : Dict = OwlViTProcessor(tokenizer=__A , image_processor=__A )
lowerCamelCase : Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase : List[Any] = processor.batch_decode(__A )
lowerCamelCase : Union[str, Any] = tokenizer.batch_decode(__A )
self.assertListEqual(__A , __A )
| 283 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : List[Any] = BioGptTokenizer
__A : Optional[int] = False
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase : Union[str, Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
lowerCamelCase : str = dict(zip(__A , range(len(__A ) ) ) )
lowerCamelCase : Dict = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
lowerCamelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCamelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Dict = "lower newer"
lowerCamelCase : Union[str, Any] = "lower newer"
return input_text, output_text
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer(self.vocab_file , self.merges_file )
lowerCamelCase : Optional[int] = "lower"
lowerCamelCase : Any = ["low", "er</w>"]
lowerCamelCase : List[str] = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokens + ["<unk>"]
lowerCamelCase : List[str] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
lowerCamelCase : Optional[int] = tokenizer.encode("sequence builders" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=__A )
lowerCamelCase : Tuple = tokenizer.build_inputs_with_special_tokens(__A )
lowerCamelCase : List[str] = tokenizer.build_inputs_with_special_tokens(__A , __A )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a )
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or number < 0:
raise ValueError("Input must be a non-negative integer" )
lowerCamelCase : Dict = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
lowerCamelCase : List[Any] = 1
lowerCamelCase : Union[str, Any] = 1
while repunit:
lowerCamelCase : Union[str, Any] = (10 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def lowercase_( SCREAMING_SNAKE_CASE_ = 1000000 ):
'''simple docstring'''
lowerCamelCase : List[str] = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(SCREAMING_SNAKE_CASE_ ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_snake_case = 2_99_79_24_58
# Symbols
_snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if event is None:
lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_snake_case = transform(29_97_92_45)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_snake_case = {ct: c, x: 1, y: 1, z: 1}
_snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 283 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
_snake_case = logging.get_logger(__name__)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Tuple = "backbone." if is_semantic else ""
lowerCamelCase : int = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", "beit.embeddings.cls_token"),
(f"""{prefix}patch_embed.proj.weight""", "beit.embeddings.patch_embeddings.projection.weight"),
(f"""{prefix}patch_embed.proj.bias""", "beit.embeddings.patch_embeddings.projection.bias"),
(f"""{prefix}pos_embed""", "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
lowerCamelCase : Optional[Any] = "backbone." if is_semantic else ""
# queries, keys and values
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
lowerCamelCase : Optional[Any] = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
lowerCamelCase : Tuple = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
lowerCamelCase : str = in_proj_weight[
: config.hidden_size, :
]
lowerCamelCase : Any = q_bias
lowerCamelCase : Any = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowerCamelCase : Optional[int] = in_proj_weight[
-config.hidden_size :, :
]
lowerCamelCase : int = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
lowerCamelCase : Any = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
lowerCamelCase : int = gamma_a
lowerCamelCase : Optional[Any] = gamma_a
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = dct.pop(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = val
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : Optional[Any] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
return im
@torch.no_grad()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : List[Any] = False if "rvlcdip" in checkpoint_url else True
lowerCamelCase : str = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE_ , use_mask_token=SCREAMING_SNAKE_CASE_ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
lowerCamelCase : Union[str, Any] = 1024
lowerCamelCase : Any = 4096
lowerCamelCase : str = 24
lowerCamelCase : List[Any] = 16
# labels
if "rvlcdip" in checkpoint_url:
lowerCamelCase : Optional[Any] = 16
lowerCamelCase : Tuple = "huggingface/label-files"
lowerCamelCase : List[str] = "rvlcdip-id2label.json"
lowerCamelCase : str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type="dataset" ) , "r" ) )
lowerCamelCase : Any = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCamelCase : Tuple = idalabel
lowerCamelCase : Dict = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
lowerCamelCase : int = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Tuple = create_rename_keys(SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
read_in_q_k_v(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , has_lm_head=SCREAMING_SNAKE_CASE_ )
# load HuggingFace model
lowerCamelCase : List[Any] = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE_ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# Check outputs on an image
lowerCamelCase : str = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = prepare_img()
lowerCamelCase : Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
lowerCamelCase : Optional[Any] = encoding["pixel_values"]
lowerCamelCase : Optional[Any] = model(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Dict = outputs.logits
# verify logits
lowerCamelCase : List[Any] = [1, 16] if "rvlcdip" in checkpoint_url else [1, 196, 8192]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE_ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE_ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE_ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
if has_lm_head:
lowerCamelCase : Optional[Any] = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
lowerCamelCase : Dict = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE_ , )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_url''',
default='''https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth''',
type=str,
help='''URL to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
_snake_case = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 | 1 |
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_snake_case = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Optional[int] = ReformerTokenizer
__A : Dict = ReformerTokenizerFast
__A : Dict = True
__A : Optional[int] = False
__A : Tuple = True
def _snake_case ( self ):
"""simple docstring"""
super().setUp()
lowerCamelCase : Dict = ReformerTokenizer(__A , keep_accents=__A )
tokenizer.save_pretrained(self.tmpdirname )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Any = "<s>"
lowerCamelCase : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "j" )
self.assertEqual(len(__A ) , 1000 )
def _snake_case ( self ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def _snake_case ( self ):
"""simple docstring"""
if not self.test_rust_tokenizer:
return
lowerCamelCase : List[str] = self.get_tokenizer()
lowerCamelCase : Dict = self.get_rust_tokenizer()
lowerCamelCase : str = "I was born in 92000, and this is falsé."
lowerCamelCase : Optional[Any] = tokenizer.tokenize(__A )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Union[str, Any] = tokenizer.encode(__A , add_special_tokens=__A )
lowerCamelCase : str = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = tokenizer.encode(__A )
lowerCamelCase : str = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
def _snake_case ( self , __A=15 ):
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
lowerCamelCase : Optional[int] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
lowerCamelCase : int = "This is a simple input"
lowerCamelCase : Optional[int] = ["This is a simple input 1", "This is a simple input 2"]
lowerCamelCase : Any = ("This is a simple input", "This is a pair")
lowerCamelCase : List[Any] = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
def _snake_case ( self ):
"""simple docstring"""
pass
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = ReformerTokenizer(__A , keep_accents=__A )
lowerCamelCase : str = tokenizer.tokenize("This is a test" )
self.assertListEqual(__A , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__A ) , [285, 46, 10, 170, 382] , )
lowerCamelCase : Any = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowerCamelCase : str = tokenizer.convert_tokens_to_ids(__A )
self.assertListEqual(
__A , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowerCamelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(__A )
self.assertListEqual(
__A , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def _snake_case ( self ):
"""simple docstring"""
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment" )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = "Hello World!"
lowerCamelCase : Dict = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowerCamelCase : Any = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__A , self.big_tokenizer.encode(__A ) )
@require_torch
@slow
def _snake_case ( self ):
"""simple docstring"""
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowerCamelCase : Dict = list(self.big_tokenizer.get_vocab().keys() )[:10]
lowerCamelCase : Tuple = " ".join(__A )
lowerCamelCase : List[str] = self.big_tokenizer.encode_plus(__A , return_tensors="pt" )
lowerCamelCase : Optional[int] = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt" )
lowerCamelCase : Any = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowerCamelCase : Any = encoded_sequence["input_ids"].shape
lowerCamelCase : Dict = ReformerModel(__A )
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__A )
model(**__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowerCamelCase : Optional[Any] = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__A , sequences=__A , )
| 283 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["image_processor", "tokenizer"]
__A : Dict = "BridgeTowerImageProcessor"
__A : Optional[int] = ("RobertaTokenizer", "RobertaTokenizerFast")
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__(__A , __A )
def __call__( self , __A , __A = None , __A = True , __A = False , __A = None , __A = None , __A = 0 , __A = None , __A = None , __A = None , __A = False , __A = False , __A = False , __A = False , __A = True , __A = None , **__A , ):
"""simple docstring"""
lowerCamelCase : str = self.tokenizer(
text=__A , add_special_tokens=__A , padding=__A , truncation=__A , max_length=__A , stride=__A , pad_to_multiple_of=__A , return_token_type_ids=__A , return_attention_mask=__A , return_overflowing_tokens=__A , return_special_tokens_mask=__A , return_offsets_mapping=__A , return_length=__A , verbose=__A , return_tensors=__A , **__A , )
# add pixel_values + pixel_mask
lowerCamelCase : int = self.image_processor(
__A , return_tensors=__A , do_normalize=__A , do_center_crop=__A , **__A )
encoding.update(__A )
return encoding
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__A , **__A )
def _snake_case ( self , *__A , **__A ):
"""simple docstring"""
return self.tokenizer.decode(*__A , **__A )
@property
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.tokenizer.model_input_names
lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 283 | 1 |
import argparse
import gc
import json
import os
import shutil
import warnings
import torch
from transformers import LlamaConfig, LlamaForCausalLM, LlamaTokenizer
try:
from transformers import LlamaTokenizerFast
except ImportError as e:
warnings.warn(e)
warnings.warn(
'''The converted tokenizer will be the `slow` tokenizer. To use the fast, update your `tokenizers` library and re-run the tokenizer conversion'''
)
_snake_case = None
_snake_case = {
'''7B''': 1_10_08,
'''13B''': 1_38_24,
'''30B''': 1_79_20,
'''65B''': 2_20_16,
'''70B''': 2_86_72,
}
_snake_case = {
'''7B''': 1,
'''7Bf''': 1,
'''13B''': 2,
'''13Bf''': 2,
'''30B''': 4,
'''65B''': 8,
'''70B''': 8,
'''70Bf''': 8,
}
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1 , SCREAMING_SNAKE_CASE_=256 ):
'''simple docstring'''
return multiple_of * ((int(ffn_dim_multiplier * int(8 * n / 3 ) ) + multiple_of - 1) // multiple_of)
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" ) as f:
return json.load(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "w" ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ):
'''simple docstring'''
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = os.path.join(SCREAMING_SNAKE_CASE_ , "tmp" )
os.makedirs(SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[str] = read_json(os.path.join(SCREAMING_SNAKE_CASE_ , "params.json" ) )
lowerCamelCase : Tuple = NUM_SHARDS[model_size]
lowerCamelCase : List[str] = params["n_layers"]
lowerCamelCase : int = params["n_heads"]
lowerCamelCase : List[str] = n_heads // num_shards
lowerCamelCase : Tuple = params["dim"]
lowerCamelCase : Optional[int] = dim // n_heads
lowerCamelCase : Dict = 10000.0
lowerCamelCase : str = 1.0 / (base ** (torch.arange(0 , SCREAMING_SNAKE_CASE_ , 2 ).float() / dims_per_head))
if "n_kv_heads" in params:
lowerCamelCase : Union[str, Any] = params["n_kv_heads"] # for GQA / MQA
lowerCamelCase : Tuple = n_heads_per_shard // num_key_value_heads
lowerCamelCase : str = dim // num_key_value_heads
else: # compatibility with other checkpoints
lowerCamelCase : Union[str, Any] = n_heads
lowerCamelCase : int = n_heads_per_shard
lowerCamelCase : Union[str, Any] = dim
# permute for sliced rotary
def permute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=n_heads , SCREAMING_SNAKE_CASE_=dim , SCREAMING_SNAKE_CASE_=dim ):
return w.view(SCREAMING_SNAKE_CASE_ , dima // n_heads // 2 , 2 , SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f"""Fetching all parameters from the checkpoint at {input_base_path}.""" )
# Load weights
if model_size == "7B":
# Not sharded
# (The sharded implementation would also work, but this is simpler.)
lowerCamelCase : List[str] = torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , "consolidated.00.pth" ) , map_location="cpu" )
else:
# Sharded
lowerCamelCase : Dict = [
torch.load(os.path.join(SCREAMING_SNAKE_CASE_ , f"""consolidated.{i:02d}.pth""" ) , map_location="cpu" )
for i in range(SCREAMING_SNAKE_CASE_ )
]
lowerCamelCase : Any = 0
lowerCamelCase : Any = {"weight_map": {}}
for layer_i in range(SCREAMING_SNAKE_CASE_ ):
lowerCamelCase : Optional[int] = f"""pytorch_model-{layer_i + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowerCamelCase : Optional[int] = {
f"""model.layers.{layer_i}.self_attn.q_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wq.weight"""] ),
f"""model.layers.{layer_i}.self_attn.k_proj.weight""": permute(
loaded[f"""layers.{layer_i}.attention.wk.weight"""] ),
f"""model.layers.{layer_i}.self_attn.v_proj.weight""": loaded[f"""layers.{layer_i}.attention.wv.weight"""],
f"""model.layers.{layer_i}.self_attn.o_proj.weight""": loaded[f"""layers.{layer_i}.attention.wo.weight"""],
f"""model.layers.{layer_i}.mlp.gate_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w1.weight"""],
f"""model.layers.{layer_i}.mlp.down_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w2.weight"""],
f"""model.layers.{layer_i}.mlp.up_proj.weight""": loaded[f"""layers.{layer_i}.feed_forward.w3.weight"""],
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[f"""layers.{layer_i}.attention_norm.weight"""],
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[f"""layers.{layer_i}.ffn_norm.weight"""],
}
else:
# Sharded
# Note that attention.w{q,k,v,o}, feed_fordward.w[1,2,3], attention_norm.weight and ffn_norm.weight share
# the same storage object, saving attention_norm and ffn_norm will save other weights too, which is
# redundant as other weights will be stitched from multiple shards. To avoid that, they are cloned.
lowerCamelCase : Dict = {
f"""model.layers.{layer_i}.input_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.attention_norm.weight"""
].clone(),
f"""model.layers.{layer_i}.post_attention_layernorm.weight""": loaded[0][
f"""layers.{layer_i}.ffn_norm.weight"""
].clone(),
}
lowerCamelCase : Optional[Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wq.weight"""].view(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : Union[str, Any] = permute(
torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wk.weight"""].view(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
lowerCamelCase : Optional[int] = torch.cat(
[
loaded[i][f"""layers.{layer_i}.attention.wv.weight"""].view(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ )
] , dim=0 , ).reshape(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = torch.cat(
[loaded[i][f"""layers.{layer_i}.attention.wo.weight"""] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowerCamelCase : Tuple = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w1.weight"""] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=0 )
lowerCamelCase : List[str] = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w2.weight"""] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=1 )
lowerCamelCase : Optional[Any] = torch.cat(
[loaded[i][f"""layers.{layer_i}.feed_forward.w3.weight"""] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=0 )
lowerCamelCase : List[Any] = inv_freq
for k, v in state_dict.items():
lowerCamelCase : Dict = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase : List[str] = f"""pytorch_model-{n_layers + 1}-of-{n_layers + 1}.bin"""
if model_size == "7B":
# Unsharded
lowerCamelCase : Optional[int] = {
"model.embed_tokens.weight": loaded["tok_embeddings.weight"],
"model.norm.weight": loaded["norm.weight"],
"lm_head.weight": loaded["output.weight"],
}
else:
lowerCamelCase : Any = {
"model.norm.weight": loaded[0]["norm.weight"],
"model.embed_tokens.weight": torch.cat(
[loaded[i]["tok_embeddings.weight"] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=1 ),
"lm_head.weight": torch.cat([loaded[i]["output.weight"] for i in range(SCREAMING_SNAKE_CASE_ )] , dim=0 ),
}
for k, v in state_dict.items():
lowerCamelCase : List[Any] = filename
param_count += v.numel()
torch.save(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
# Write configs
lowerCamelCase : Optional[int] = {"total_size": param_count * 2}
write_json(SCREAMING_SNAKE_CASE_ , os.path.join(SCREAMING_SNAKE_CASE_ , "pytorch_model.bin.index.json" ) )
lowerCamelCase : Tuple = params["ffn_dim_multiplier"] if "ffn_dim_multiplier" in params else 1
lowerCamelCase : Dict = params["multiple_of"] if "multiple_of" in params else 256
lowerCamelCase : Tuple = LlamaConfig(
hidden_size=SCREAMING_SNAKE_CASE_ , intermediate_size=compute_intermediate_size(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , num_attention_heads=params["n_heads"] , num_hidden_layers=params["n_layers"] , rms_norm_eps=params["norm_eps"] , num_key_value_heads=SCREAMING_SNAKE_CASE_ , )
config.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Make space so we can load the model properly now.
del state_dict
del loaded
gc.collect()
print("Loading the checkpoint in a Llama model." )
lowerCamelCase : List[Any] = LlamaForCausalLM.from_pretrained(SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , low_cpu_mem_usage=SCREAMING_SNAKE_CASE_ )
# Avoid saving this as part of the config.
del model.config._name_or_path
print("Saving in the Transformers format." )
model.save_pretrained(SCREAMING_SNAKE_CASE_ , safe_serialization=SCREAMING_SNAKE_CASE_ )
shutil.rmtree(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = LlamaTokenizer if LlamaTokenizerFast is None else LlamaTokenizerFast
print(f"""Saving a {tokenizer_class.__name__} to {tokenizer_path}.""" )
lowerCamelCase : int = tokenizer_class(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE_ )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"--input_dir" , help="Location of LLaMA weights, which contains tokenizer.model and model folders" , )
parser.add_argument(
"--model_size" , choices=["7B", "7Bf", "13B", "13Bf", "30B", "65B", "70B", "70Bf", "tokenizer_only"] , )
parser.add_argument(
"--output_dir" , help="Location to write HF model and tokenizer" , )
parser.add_argument("--safe_serialization" , type=SCREAMING_SNAKE_CASE_ , help="Whether or not to save using `safetensors`." )
lowerCamelCase : Union[str, Any] = parser.parse_args()
if args.model_size != "tokenizer_only":
write_model(
model_path=args.output_dir , input_base_path=os.path.join(args.input_dir , args.model_size ) , model_size=args.model_size , safe_serialization=args.safe_serialization , )
lowerCamelCase : int = os.path.join(args.input_dir , "tokenizer.model" )
write_tokenizer(args.output_dir , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for i in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , 0 , -1 ):
lowerCamelCase : Tuple = False
for j in range(SCREAMING_SNAKE_CASE_ , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
lowerCamelCase , lowerCamelCase : int = unsorted[j - 1], unsorted[j]
lowerCamelCase : Optional[int] = True
for j in range(SCREAMING_SNAKE_CASE_ ):
if unsorted[j] > unsorted[j + 1]:
lowerCamelCase , lowerCamelCase : Union[str, Any] = unsorted[j + 1], unsorted[j]
lowerCamelCase : Optional[Any] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
_snake_case = input('''Enter numbers separated by a comma:\n''').strip()
_snake_case = [int(item) for item in user_input.split(''',''')]
print(f'''{cocktail_shaker_sort(unsorted) = }''')
| 283 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = XLMRobertaModel.from_pretrained("xlm-roberta-base" )
lowerCamelCase : str = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase : Optional[int] = torch.Size((1, 12, 768) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase : Tuple = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase : Any = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = XLMRobertaModel.from_pretrained("xlm-roberta-large" )
lowerCamelCase : List[str] = torch.tensor([[0, 581, 1_0269, 83, 9_9942, 136, 6_0742, 23, 70, 8_0583, 1_8276, 2]] )
# The dog is cute and lives in the garden house
lowerCamelCase : int = torch.Size((1, 12, 1024) ) # batch_size, sequence_length, embedding_vector_dim
lowerCamelCase : Union[str, Any] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
lowerCamelCase : List[str] = model(__A )["last_hidden_state"].detach()
self.assertEqual(output.shape , __A )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , __A , atol=1e-3 ) )
| 283 |
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = StableUnCLIPPipeline
__A : Optional[int] = TEXT_TO_IMAGE_PARAMS
__A : str = TEXT_TO_IMAGE_BATCH_PARAMS
__A : int = TEXT_TO_IMAGE_IMAGE_PARAMS
__A : Tuple = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__A : Union[str, Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[str] = 32
lowerCamelCase : Dict = embedder_hidden_size
# prior components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=__A , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=__A , num_layers=1 , )
torch.manual_seed(0 )
lowerCamelCase : Dict = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=1000 , clip_sample=__A , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
lowerCamelCase : Optional[int] = StableUnCLIPImageNormalizer(embedding_dim=__A )
lowerCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
lowerCamelCase : List[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
lowerCamelCase : str = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=__A , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
lowerCamelCase : Any = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=__A , layers_per_block=1 , upcast_attention=__A , use_linear_projection=__A , )
torch.manual_seed(0 )
lowerCamelCase : int = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=__A , steps_offset=1 , )
torch.manual_seed(0 )
lowerCamelCase : Optional[Any] = AutoencoderKL()
lowerCamelCase : Optional[int] = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def _snake_case ( self , __A , __A=0 ):
"""simple docstring"""
if str(__A ).startswith("mps" ):
lowerCamelCase : Optional[int] = torch.manual_seed(__A )
else:
lowerCamelCase : Optional[Any] = torch.Generator(device=__A ).manual_seed(__A )
lowerCamelCase : Tuple = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=__A )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
lowerCamelCase : str = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : List[Any] = torch.Generator(device="cpu" ).manual_seed(0 )
lowerCamelCase : Dict = pipe("anime turle" , generator=__A , output_type="np" )
lowerCamelCase : Dict = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(__A , __A )
def _snake_case ( self ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCamelCase : int = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
lowerCamelCase : Union[str, Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
lowerCamelCase : Any = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
lowerCamelCase : List[str] = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 283 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A=13 , __A=7 , __A=True , __A=True , __A=True , __A=True , __A=99 , __A=32 , __A=5 , __A=4 , __A=37 , __A="gelu" , __A=0.1 , __A=0.1 , __A=512 , __A=16 , __A=2 , __A=0.02 , __A=False , __A=True , __A="None" , __A=3 , __A=4 , __A=None , ):
"""simple docstring"""
lowerCamelCase : int = parent
lowerCamelCase : List[Any] = batch_size
lowerCamelCase : List[Any] = seq_length
lowerCamelCase : List[str] = is_training
lowerCamelCase : Optional[int] = use_input_mask
lowerCamelCase : Tuple = use_token_type_ids
lowerCamelCase : Any = use_labels
lowerCamelCase : Tuple = vocab_size
lowerCamelCase : Optional[int] = hidden_size
lowerCamelCase : Dict = num_hidden_layers
lowerCamelCase : str = num_attention_heads
lowerCamelCase : Tuple = intermediate_size
lowerCamelCase : List[Any] = hidden_act
lowerCamelCase : str = hidden_dropout_prob
lowerCamelCase : Optional[Any] = attention_probs_dropout_prob
lowerCamelCase : List[Any] = max_position_embeddings
lowerCamelCase : Any = type_vocab_size
lowerCamelCase : List[Any] = type_sequence_label_size
lowerCamelCase : List[str] = initializer_range
lowerCamelCase : Optional[int] = num_labels
lowerCamelCase : Dict = num_choices
lowerCamelCase : int = relative_attention
lowerCamelCase : Union[str, Any] = position_biased_input
lowerCamelCase : Optional[Any] = pos_att_type
lowerCamelCase : List[str] = scope
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : List[str] = None
if self.use_input_mask:
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCamelCase : Any = None
if self.use_token_type_ids:
lowerCamelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase : Dict = None
lowerCamelCase : int = None
lowerCamelCase : List[Any] = None
if self.use_labels:
lowerCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self ):
"""simple docstring"""
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.get_config()
lowerCamelCase : Tuple = 300
return config
def _snake_case ( self , __A ):
"""simple docstring"""
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = DebertaModel(config=__A )
model.to(__A )
model.eval()
lowerCamelCase : Any = model(__A , attention_mask=__A , token_type_ids=__A )[0]
lowerCamelCase : int = model(__A , token_type_ids=__A )[0]
lowerCamelCase : Optional[Any] = model(__A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = DebertaForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowerCamelCase : Dict = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Dict = self.num_labels
lowerCamelCase : str = DebertaForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCamelCase : Optional[int] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__A )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.num_labels
lowerCamelCase : List[str] = DebertaForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCamelCase : Any = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = DebertaForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowerCamelCase : Tuple = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Tuple = config_and_inputs
lowerCamelCase : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : Tuple = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
__A : Dict = (
{
"feature-extraction": DebertaModel,
"fill-mask": DebertaForMaskedLM,
"question-answering": DebertaForQuestionAnswering,
"text-classification": DebertaForSequenceClassification,
"token-classification": DebertaForTokenClassification,
"zero-shot": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
__A : Union[str, Any] = True
__A : int = False
__A : str = False
__A : Optional[Any] = False
__A : Optional[Any] = False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = DebertaModelTester(self )
lowerCamelCase : Dict = ConfigTester(self , config_class=__A , hidden_size=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : Optional[Any] = DebertaModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def _snake_case ( self ):
"""simple docstring"""
pass
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowerCamelCase : int = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
lowerCamelCase : int = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase : str = model(__A , attention_mask=__A )[0]
# compare the actual values for a slice.
lowerCamelCase : List[Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) , F"""{output[:, 1:4, 1:4]}""" )
| 283 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_snake_case = {
'''configuration_squeezebert''': [
'''SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SqueezeBertConfig''',
'''SqueezeBertOnnxConfig''',
],
'''tokenization_squeezebert''': ['''SqueezeBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''SqueezeBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SqueezeBertForMaskedLM''',
'''SqueezeBertForMultipleChoice''',
'''SqueezeBertForQuestionAnswering''',
'''SqueezeBertForSequenceClassification''',
'''SqueezeBertForTokenClassification''',
'''SqueezeBertModel''',
'''SqueezeBertModule''',
'''SqueezeBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCAmelCase_ ( yaml.SafeLoader ):
'''simple docstring'''
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : int = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowerCamelCase : Optional[int] = [tuple(__A ) if isinstance(__A , __A ) else key for key in keys]
lowerCamelCase : Optional[int] = Counter(__A )
lowerCamelCase : Union[str, Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F"""Got duplicate yaml keys: {duplicate_keys}""" )
def _snake_case ( self , __A , __A=False ):
"""simple docstring"""
lowerCamelCase : Any = super().construct_mapping(__A , deep=__A )
self._check_no_duplicates_on_constructed_node(__A )
return mapping
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowerCamelCase : str = full_content[1:].index("---" ) + 1
lowerCamelCase : Optional[int] = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(SCREAMING_SNAKE_CASE_ )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = {"train_eval_index"} # train-eval-index in the YAML metadata
@classmethod
def _snake_case ( cls , __A ):
"""simple docstring"""
with open(__A , encoding="utf-8" ) as readme_file:
lowerCamelCase , lowerCamelCase : Dict = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__A )
else:
return cls()
def _snake_case ( self , __A ):
"""simple docstring"""
if path.exists():
with open(__A , encoding="utf-8" ) as readme_file:
lowerCamelCase : int = readme_file.read()
else:
lowerCamelCase : Tuple = None
lowerCamelCase : Any = self._to_readme(__A )
with open(__A , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(__A )
def _snake_case ( self , __A = None ):
"""simple docstring"""
if readme_content is not None:
lowerCamelCase , lowerCamelCase : Dict = _split_yaml_from_readme(__A )
lowerCamelCase : List[Any] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
lowerCamelCase : List[Any] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def _snake_case ( cls , __A ):
"""simple docstring"""
lowerCamelCase : str = yaml.load(__A , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowerCamelCase : Optional[Any] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__A )
def _snake_case ( self ):
"""simple docstring"""
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=__A , allow_unicode=__A , encoding="utf-8" , ).decode("utf-8" )
_snake_case = {
'''image-classification''': [],
'''translation''': [],
'''image-segmentation''': [],
'''fill-mask''': [],
'''automatic-speech-recognition''': [],
'''token-classification''': [],
'''sentence-similarity''': [],
'''audio-classification''': [],
'''question-answering''': [],
'''summarization''': [],
'''zero-shot-classification''': [],
'''table-to-text''': [],
'''feature-extraction''': [],
'''other''': [],
'''multiple-choice''': [],
'''text-classification''': [],
'''text-to-image''': [],
'''text2text-generation''': [],
'''zero-shot-image-classification''': [],
'''tabular-classification''': [],
'''tabular-regression''': [],
'''image-to-image''': [],
'''tabular-to-text''': [],
'''unconditional-image-generation''': [],
'''text-retrieval''': [],
'''text-to-speech''': [],
'''object-detection''': [],
'''audio-to-audio''': [],
'''text-generation''': [],
'''conversational''': [],
'''table-question-answering''': [],
'''visual-question-answering''': [],
'''image-to-text''': [],
'''reinforcement-learning''': [],
'''voice-activity-detection''': [],
'''time-series-forecasting''': [],
'''document-question-answering''': [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
_snake_case = ArgumentParser(usage='''Validate the yaml metadata block of a README.md file.''')
ap.add_argument('''readme_filepath''')
_snake_case = ap.parse_args()
_snake_case = Path(args.readme_filepath)
_snake_case = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 283 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : str = "decision_transformer"
__A : Union[str, Any] = ["past_key_values"]
__A : Optional[int] = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self , __A=17 , __A=4 , __A=128 , __A=4096 , __A=True , __A=1 , __A=1024 , __A=3 , __A=1 , __A=None , __A="relu" , __A=0.1 , __A=0.1 , __A=0.1 , __A=1e-5 , __A=0.02 , __A=True , __A=True , __A=5_0256 , __A=5_0256 , __A=False , __A=False , **__A , ):
"""simple docstring"""
lowerCamelCase : List[str] = state_dim
lowerCamelCase : Tuple = act_dim
lowerCamelCase : List[str] = hidden_size
lowerCamelCase : Optional[Any] = max_ep_len
lowerCamelCase : Union[str, Any] = action_tanh
lowerCamelCase : int = vocab_size
lowerCamelCase : List[Any] = n_positions
lowerCamelCase : Dict = n_layer
lowerCamelCase : int = n_head
lowerCamelCase : List[Any] = n_inner
lowerCamelCase : Any = activation_function
lowerCamelCase : Optional[int] = resid_pdrop
lowerCamelCase : str = embd_pdrop
lowerCamelCase : Tuple = attn_pdrop
lowerCamelCase : List[Any] = layer_norm_epsilon
lowerCamelCase : Dict = initializer_range
lowerCamelCase : Optional[int] = scale_attn_weights
lowerCamelCase : List[Any] = use_cache
lowerCamelCase : Tuple = scale_attn_by_inverse_layer_idx
lowerCamelCase : Optional[int] = reorder_and_upcast_attn
lowerCamelCase : Dict = bos_token_id
lowerCamelCase : Any = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 283 | 1 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
lowerCamelCase : Any = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
lowerCamelCase : Dict = {}
for k, v in state_dict.items():
if "pred_layer" in k:
lowerCamelCase : Optional[int] = v
else:
lowerCamelCase : int = v
lowerCamelCase : Union[str, Any] = chkpt["params"]
lowerCamelCase : int = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE_ , (torch.FloatTensor, numpy.ndarray) )}
lowerCamelCase : int = chkpt["dico_word2id"]
lowerCamelCase : Any = {s + "</w>" if s.find("@@" ) == -1 and i > 13 else s.replace("@@" , "" ): i for s, i in vocab.items()}
# Save pytorch-model
lowerCamelCase : List[str] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
lowerCamelCase : Optional[int] = pytorch_dump_folder_path + "/" + CONFIG_NAME
lowerCamelCase : Optional[int] = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) + "\n" )
print(f"""Save vocab file to {pytorch_config_dump_path}""" )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE_ , indent=2 ) + "\n" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xlm_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 283 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 | 1 |
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ""
__A : str = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
__A : str = None # compression type in fsspec. ex: "gzip"
__A : str = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self , __A = "" , __A = None , __A = None , **__A ):
"""simple docstring"""
super().__init__(self , **__A )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
lowerCamelCase : Optional[int] = fsspec.open(
__A , mode="rb" , protocol=__A , compression=self.compression , client_kwargs={
"requote_redirect_url": False, # see https://github.com/huggingface/datasets/pull/5459
"trust_env": True, # Enable reading proxy env variables.
**(target_options or {}).pop("client_kwargs" , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
lowerCamelCase : Dict = os.path.basename(self.file.path.split("::" )[0] )
lowerCamelCase : Dict = (
self.compressed_name[: self.compressed_name.rindex("." )]
if "." in self.compressed_name
else self.compressed_name
)
lowerCamelCase : int = None
@classmethod
def _snake_case ( cls , __A ):
"""simple docstring"""
return super()._strip_protocol(__A ).lstrip("/" )
def _snake_case ( self ):
"""simple docstring"""
if self.dir_cache is None:
lowerCamelCase : str = {**self.file.fs.info(self.file.path ), "name": self.uncompressed_name}
lowerCamelCase : Dict = {f["name"]: f}
def _snake_case ( self , __A ):
"""simple docstring"""
return self.file.open().read()
def _snake_case ( self , __A , __A = "rb" , __A=None , __A=True , __A=None , **__A , ):
"""simple docstring"""
lowerCamelCase : Dict = self._strip_protocol(__A )
if mode != "rb":
raise ValueError(F"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""" )
return self.file.open()
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Any = "bz2"
__A : List[Any] = "bz2"
__A : str = ".bz2"
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "gzip"
__A : List[str] = "gzip"
__A : Tuple = ".gz"
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : int = "lz4"
__A : int = "lz4"
__A : List[str] = ".lz4"
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = "xz"
__A : int = "xz"
__A : Optional[int] = ".xz"
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : int = "zstd"
__A : Optional[int] = "zstd"
__A : Dict = ".zst"
def __init__( self , __A , __A = "rb" , __A = None , __A = None , __A = DEFAULT_BLOCK_SIZE , **__A , ):
"""simple docstring"""
super().__init__(
fo=__A , mode=__A , target_protocol=__A , target_options=__A , block_size=__A , **__A , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
lowerCamelCase : Union[str, Any] = self.file.__enter__
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A ):
"""simple docstring"""
lowerCamelCase : Any = file_
def __enter__( self ):
"""simple docstring"""
self._file.__enter__()
return self
def __exit__( self , *__A , **__A ):
"""simple docstring"""
self._file.__exit__(*__A , **__A )
def __iter__( self ):
"""simple docstring"""
return iter(self._file )
def _snake_case ( self ):
"""simple docstring"""
return next(self._file )
def __getattr__( self , __A ):
"""simple docstring"""
return getattr(self._file , __A )
def fixed_enter(*__A , **__A ):
return WrappedFile(_enter(*__A , **__A ) )
lowerCamelCase : Union[str, Any] = fixed_enter
| 283 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_snake_case = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 283 | 1 |
import math
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from .attention_processor import Attention
from .embeddings import get_timestep_embedding
from .modeling_utils import ModelMixin
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __A = 128 , __A = 256 , __A = 2000.0 , __A = 768 , __A = 12 , __A = 12 , __A = 64 , __A = 2048 , __A = 0.1 , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : List[str] = nn.Sequential(
nn.Linear(__A , d_model * 4 , bias=__A ) , nn.SiLU() , nn.Linear(d_model * 4 , d_model * 4 , bias=__A ) , nn.SiLU() , )
lowerCamelCase : Optional[Any] = nn.Embedding(__A , __A )
lowerCamelCase : Any = False
lowerCamelCase : Union[str, Any] = nn.Linear(__A , __A , bias=__A )
lowerCamelCase : int = nn.Dropout(p=__A )
lowerCamelCase : int = nn.ModuleList()
for lyr_num in range(__A ):
# FiLM conditional T5 decoder
lowerCamelCase : List[Any] = DecoderLayer(d_model=__A , d_kv=__A , num_heads=__A , d_ff=__A , dropout_rate=__A )
self.decoders.append(__A )
lowerCamelCase : Dict = TaLayerNorm(__A )
lowerCamelCase : Any = nn.Dropout(p=__A )
lowerCamelCase : Optional[int] = nn.Linear(__A , __A , bias=__A )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : int = torch.mul(query_input.unsqueeze(-1 ) , key_input.unsqueeze(-2 ) )
return mask.unsqueeze(-3 )
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase , lowerCamelCase , lowerCamelCase : List[Any] = decoder_input_tokens.shape
assert decoder_noise_time.shape == (batch,)
# decoder_noise_time is in [0, 1), so rescale to expected timing range.
lowerCamelCase : List[Any] = get_timestep_embedding(
decoder_noise_time * self.config.max_decoder_noise_time , embedding_dim=self.config.d_model , max_period=self.config.max_decoder_noise_time , ).to(dtype=self.dtype )
lowerCamelCase : Union[str, Any] = self.conditioning_emb(__A ).unsqueeze(1 )
assert conditioning_emb.shape == (batch, 1, self.config.d_model * 4)
lowerCamelCase : int = decoder_input_tokens.shape[1]
# If we want to use relative positions for audio context, we can just offset
# this sequence by the length of encodings_and_masks.
lowerCamelCase : int = torch.broadcast_to(
torch.arange(__A , device=decoder_input_tokens.device ) , (batch, seq_length) , )
lowerCamelCase : str = self.position_encoding(__A )
lowerCamelCase : Tuple = self.continuous_inputs_projection(__A )
inputs += position_encodings
lowerCamelCase : List[str] = self.dropout(__A )
# decoder: No padding present.
lowerCamelCase : Optional[int] = torch.ones(
decoder_input_tokens.shape[:2] , device=decoder_input_tokens.device , dtype=inputs.dtype )
# Translate encoding masks to encoder-decoder masks.
lowerCamelCase : List[Any] = [(x, self.encoder_decoder_mask(__A , __A )) for x, y in encodings_and_masks]
# cross attend style: concat encodings
lowerCamelCase : Optional[Any] = torch.cat([x[0] for x in encodings_and_encdec_masks] , dim=1 )
lowerCamelCase : Union[str, Any] = torch.cat([x[1] for x in encodings_and_encdec_masks] , dim=-1 )
for lyr in self.decoders:
lowerCamelCase : List[str] = lyr(
__A , conditioning_emb=__A , encoder_hidden_states=__A , encoder_attention_mask=__A , )[0]
lowerCamelCase : Optional[int] = self.decoder_norm(__A )
lowerCamelCase : Any = self.post_dropout(__A )
lowerCamelCase : Optional[Any] = self.spec_out(__A )
return spec_out
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A=1e-6 ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Optional[int] = nn.ModuleList()
# cond self attention: layer 0
self.layer.append(
TaLayerSelfAttentionCond(d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A ) )
# cross attention: layer 1
self.layer.append(
TaLayerCrossAttention(
d_model=__A , d_kv=__A , num_heads=__A , dropout_rate=__A , layer_norm_epsilon=__A , ) )
# Film Cond MLP + dropout: last layer
self.layer.append(
TaLayerFFCond(d_model=__A , d_ff=__A , dropout_rate=__A , layer_norm_epsilon=__A ) )
def _snake_case ( self , __A , __A=None , __A=None , __A=None , __A=None , __A=None , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.layer[0](
__A , conditioning_emb=__A , attention_mask=__A , )
if encoder_hidden_states is not None:
lowerCamelCase : List[Any] = torch.where(encoder_attention_mask > 0 , 0 , -1e10 ).to(
encoder_hidden_states.dtype )
lowerCamelCase : Optional[Any] = self.layer[1](
__A , key_value_states=__A , attention_mask=__A , )
# Apply Film Conditional Feed Forward layer
lowerCamelCase : List[str] = self.layer[-1](__A , __A )
return (hidden_states,)
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Optional[Any] = TaLayerNorm(__A )
lowerCamelCase : Dict = TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
lowerCamelCase : str = Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
lowerCamelCase : Optional[Any] = nn.Dropout(__A )
def _snake_case ( self , __A , __A=None , __A=None , ):
"""simple docstring"""
lowerCamelCase : Tuple = self.layer_norm(__A )
if conditioning_emb is not None:
lowerCamelCase : Dict = self.FiLMLayer(__A , __A )
# Self-attention block
lowerCamelCase : Union[str, Any] = self.attention(__A )
lowerCamelCase : Dict = hidden_states + self.dropout(__A )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Union[str, Any] = Attention(query_dim=__A , heads=__A , dim_head=__A , out_bias=__A , scale_qk=__A )
lowerCamelCase : Tuple = TaLayerNorm(__A , eps=__A )
lowerCamelCase : Union[str, Any] = nn.Dropout(__A )
def _snake_case ( self , __A , __A=None , __A=None , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.layer_norm(__A )
lowerCamelCase : Dict = self.attention(
__A , encoder_hidden_states=__A , attention_mask=attention_mask.squeeze(1 ) , )
lowerCamelCase : List[Any] = hidden_states + self.dropout(__A )
return layer_output
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Any = TaDenseGatedActDense(d_model=__A , d_ff=__A , dropout_rate=__A )
lowerCamelCase : Optional[int] = TaFiLMLayer(in_features=d_model * 4 , out_features=__A )
lowerCamelCase : str = TaLayerNorm(__A , eps=__A )
lowerCamelCase : Union[str, Any] = nn.Dropout(__A )
def _snake_case ( self , __A , __A=None ):
"""simple docstring"""
lowerCamelCase : Any = self.layer_norm(__A )
if conditioning_emb is not None:
lowerCamelCase : str = self.film(__A , __A )
lowerCamelCase : Union[str, Any] = self.DenseReluDense(__A )
lowerCamelCase : Dict = hidden_states + self.dropout(__A )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A , __A ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Optional[Any] = nn.Linear(__A , __A , bias=__A )
lowerCamelCase : Optional[Any] = nn.Linear(__A , __A , bias=__A )
lowerCamelCase : Optional[int] = nn.Linear(__A , __A , bias=__A )
lowerCamelCase : Optional[Any] = nn.Dropout(__A )
lowerCamelCase : int = NewGELUActivation()
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : List[str] = self.act(self.wi_a(__A ) )
lowerCamelCase : int = self.wi_a(__A )
lowerCamelCase : List[Any] = hidden_gelu * hidden_linear
lowerCamelCase : Dict = self.dropout(__A )
lowerCamelCase : Tuple = self.wo(__A )
return hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A=1e-6 ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Optional[int] = nn.Parameter(torch.ones(__A ) )
lowerCamelCase : Optional[Any] = eps
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Tuple = hidden_states.to(torch.floataa ).pow(2 ).mean(-1 , keepdim=__A )
lowerCamelCase : int = hidden_states * torch.rsqrt(variance + self.variance_epsilon )
# convert into half-precision if necessary
if self.weight.dtype in [torch.floataa, torch.bfloataa]:
lowerCamelCase : Any = hidden_states.to(self.weight.dtype )
return self.weight * hidden_states
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def _snake_case ( self , __A ):
"""simple docstring"""
return 0.5 * input * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi ) * (input + 0.044715 * torch.pow(__A , 3.0 )) ))
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__()
lowerCamelCase : List[Any] = nn.Linear(__A , out_features * 2 , bias=__A )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = self.scale_bias(__A )
lowerCamelCase , lowerCamelCase : Optional[Any] = torch.chunk(__A , 2 , -1 )
lowerCamelCase : Dict = x * (1 + scale) + shift
return x
| 283 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
_snake_case = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
_snake_case = parser.parse_args()
_snake_case = '''cpu'''
_snake_case = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
_snake_case = '''path-to-your-trained-model'''
_snake_case = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
_snake_case = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
_snake_case = pipe.to(device)
# to channels last
_snake_case = pipe.unet.to(memory_format=torch.channels_last)
_snake_case = pipe.vae.to(memory_format=torch.channels_last)
_snake_case = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
_snake_case = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
_snake_case = torch.randn(2, 4, 64, 64)
_snake_case = torch.rand(1) * 9_99
_snake_case = torch.randn(2, 77, 7_68)
_snake_case = (sample, timestep, encoder_hidden_status)
try:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
_snake_case = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
_snake_case = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
_snake_case = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
_snake_case = 6_66
_snake_case = torch.Generator(device).manual_seed(seed)
_snake_case = {'''generator''': generator}
if args.steps is not None:
_snake_case = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
_snake_case = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 283 | 1 |
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_snake_case = '''python tqdm regex requests packaging filelock numpy tokenizers'''.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('''dataclasses''')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('''importlib_metadata''')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ):
'''simple docstring'''
require_version(deps[pkg] , SCREAMING_SNAKE_CASE_ )
| 283 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , ):
"""simple docstring"""
lowerCamelCase : str = parent
lowerCamelCase : Union[str, Any] = 13
lowerCamelCase : Optional[Any] = 7
lowerCamelCase : List[str] = True
lowerCamelCase : Optional[int] = True
lowerCamelCase : Union[str, Any] = True
lowerCamelCase : List[Any] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
lowerCamelCase : Tuple = False
lowerCamelCase : Union[str, Any] = 2
lowerCamelCase : Dict = 99
lowerCamelCase : Tuple = 0
lowerCamelCase : Any = 32
lowerCamelCase : List[Any] = 2
lowerCamelCase : Tuple = 4
lowerCamelCase : List[str] = 0.1
lowerCamelCase : int = 0.1
lowerCamelCase : int = 512
lowerCamelCase : List[Any] = 16
lowerCamelCase : Any = 2
lowerCamelCase : Any = 0.02
lowerCamelCase : List[str] = 3
lowerCamelCase : Tuple = 4
lowerCamelCase : int = "last"
lowerCamelCase : int = True
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = 0
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase : Tuple = random_attention_mask([self.batch_size, self.seq_length] , dtype=tf.floataa )
lowerCamelCase : Tuple = None
if self.use_input_lengths:
lowerCamelCase : Optional[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowerCamelCase : str = None
if self.use_token_type_ids:
lowerCamelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowerCamelCase : Dict = None
lowerCamelCase : Dict = None
lowerCamelCase : Tuple = None
if self.use_labels:
lowerCamelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase : int = ids_tensor([self.batch_size] , 2 , dtype=tf.floataa )
lowerCamelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase : List[Any] = FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , bos_token_id=self.bos_token_id , )
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = TFFlaubertModel(config=__A )
lowerCamelCase : Any = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : Dict = model(__A )
lowerCamelCase : Any = [input_ids, input_mask]
lowerCamelCase : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : int = TFFlaubertWithLMHeadModel(__A )
lowerCamelCase : List[str] = {"input_ids": input_ids, "lengths": input_lengths, "langs": token_type_ids}
lowerCamelCase : int = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = TFFlaubertForQuestionAnsweringSimple(__A )
lowerCamelCase : Optional[int] = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertForSequenceClassification(__A )
lowerCamelCase : str = {"input_ids": input_ids, "lengths": input_lengths}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Tuple = self.num_labels
lowerCamelCase : Optional[Any] = TFFlaubertForTokenClassification(config=__A )
lowerCamelCase : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , __A , __A , ):
"""simple docstring"""
lowerCamelCase : Any = self.num_choices
lowerCamelCase : Optional[Any] = TFFlaubertForMultipleChoice(config=__A )
lowerCamelCase : Tuple = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
lowerCamelCase : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
lowerCamelCase : Union[str, Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.prepare_config_and_inputs()
(
(
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) , (
lowerCamelCase
) ,
) : Optional[Any] = config_and_inputs
lowerCamelCase : List[Any] = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"langs": token_type_ids,
"lengths": input_lengths,
}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__A : str = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
__A : Dict = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
__A : Any = (
{
"feature-extraction": TFFlaubertModel,
"fill-mask": TFFlaubertWithLMHeadModel,
"question-answering": TFFlaubertForQuestionAnsweringSimple,
"text-classification": TFFlaubertForSequenceClassification,
"token-classification": TFFlaubertForTokenClassification,
"zero-shot": TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
__A : List[str] = False
__A : List[str] = False
def _snake_case ( self , __A , __A , __A , __A , __A ):
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("Fast" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = TFFlaubertModelTester(self )
lowerCamelCase : Optional[int] = ConfigTester(self , config_class=__A , emb_dim=37 )
def _snake_case ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*__A )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*__A )
@slow
def _snake_case ( self ):
"""simple docstring"""
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase : int = TFFlaubertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_tf
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[int] = TFFlaubertModel.from_pretrained("jplu/tf-flaubert-small-cased" )
lowerCamelCase : str = tf.convert_to_tensor(
[[0, 158, 735, 2592, 1424, 6727, 82, 1]] , dtype=tf.intaa , ) # "J'aime flaubert !"
lowerCamelCase : Dict = model(__A )[0]
lowerCamelCase : List[str] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
lowerCamelCase : Tuple = tf.convert_to_tensor(
[
[
[-1.8768773, -1.566555, 0.27072418],
[-1.6920038, -0.5873505, 1.9329599],
[-2.9563985, -1.6993835, 1.7972052],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 283 | 1 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=[] ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = size[0] - overlap_pixels * 2
lowerCamelCase : int = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCamelCase : Tuple = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCamelCase : List[Any] = np.pad(SCREAMING_SNAKE_CASE_ , mode="linear_ramp" , pad_width=SCREAMING_SNAKE_CASE_ , end_values=0 )
if "l" in remove_borders:
lowerCamelCase : Optional[Any] = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCamelCase : List[Any] = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCamelCase : List[Any] = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCamelCase : Tuple = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return max(SCREAMING_SNAKE_CASE_ , min(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = list(SCREAMING_SNAKE_CASE_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCamelCase : Any = clamp_rect(SCREAMING_SNAKE_CASE_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Dict = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(SCREAMING_SNAKE_CASE_ , (original_slice, 0) )
return result
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCamelCase : int = tile.crop(SCREAMING_SNAKE_CASE_ )
return tile
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = n % d
return n - divisor
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A , __A , __A , __A , __A , __A = 350 , ):
"""simple docstring"""
super().__init__(
vae=__A , text_encoder=__A , tokenizer=__A , unet=__A , low_res_scheduler=__A , scheduler=__A , max_noise_level=__A , )
def _snake_case ( self , __A , __A , __A , __A , __A , __A , __A , **__A ):
"""simple docstring"""
torch.manual_seed(0 )
lowerCamelCase : Tuple = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCamelCase : Union[str, Any] = add_overlap_rect(__A , __A , image.size )
lowerCamelCase : List[str] = image.crop(__A )
lowerCamelCase : Optional[int] = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCamelCase : int = translated_slice_x - (original_image_slice / 2)
lowerCamelCase : Optional[Any] = max(0 , __A )
lowerCamelCase : Tuple = squeeze_tile(__A , __A , __A , __A )
lowerCamelCase : Dict = to_input.size
lowerCamelCase : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCamelCase : Dict = super(__A , self ).__call__(image=__A , **__A ).images[0]
lowerCamelCase : Tuple = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCamelCase : Optional[Any] = unsqueeze_tile(__A , __A )
lowerCamelCase : Optional[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCamelCase : int = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCamelCase : int = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=__A ) , mode="L" , )
final_image.paste(
__A , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , __A )
@torch.no_grad()
def __call__( self , __A , __A , __A = 75 , __A = 9.0 , __A = 50 , __A = None , __A = 1 , __A = 0.0 , __A = None , __A = None , __A = None , __A = 1 , __A = 128 , __A = 32 , __A = 32 , ):
"""simple docstring"""
lowerCamelCase : Dict = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCamelCase : Union[str, Any] = math.ceil(image.size[0] / tile_size )
lowerCamelCase : Dict = math.ceil(image.size[1] / tile_size )
lowerCamelCase : str = tcx * tcy
lowerCamelCase : int = 0
for y in range(__A ):
for x in range(__A ):
self._process_tile(
__A , __A , __A , __A , __A , __A , __A , prompt=__A , num_inference_steps=__A , guidance_scale=__A , noise_level=__A , negative_prompt=__A , num_images_per_prompt=__A , eta=__A , generator=__A , latents=__A , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Dict = "stabilityai/stable-diffusion-x4-upscaler"
lowerCamelCase : Union[str, Any] = StableDiffusionTiledUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCamelCase : Optional[Any] = pipe.to("cuda" )
lowerCamelCase : List[str] = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(SCREAMING_SNAKE_CASE_ ):
print(f"""progress: {obj['progress']:.4f}""" )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCamelCase : int = pipe(image=SCREAMING_SNAKE_CASE_ , prompt="Black font, white background, vector" , noise_level=40 , callback=SCREAMING_SNAKE_CASE_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 283 | 1 |
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 283 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_snake_case = logging.get_logger(__name__)
_snake_case = {
'''google/mobilenet_v2_1.4_224''': '''https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json''',
'''google/mobilenet_v2_1.0_224''': '''https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json''',
'''google/mobilenet_v2_0.75_160''': '''https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json''',
'''google/mobilenet_v2_0.35_96''': '''https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json''',
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "mobilenet_v2"
def __init__( self , __A=3 , __A=224 , __A=1.0 , __A=8 , __A=8 , __A=6 , __A=32 , __A=True , __A=True , __A="relu6" , __A=True , __A=0.8 , __A=0.02 , __A=0.001 , __A=255 , **__A , ):
"""simple docstring"""
super().__init__(**__A )
if depth_multiplier <= 0:
raise ValueError("depth_multiplier must be greater than zero." )
lowerCamelCase : str = num_channels
lowerCamelCase : Any = image_size
lowerCamelCase : Union[str, Any] = depth_multiplier
lowerCamelCase : Tuple = depth_divisible_by
lowerCamelCase : Dict = min_depth
lowerCamelCase : Dict = expand_ratio
lowerCamelCase : Optional[Any] = output_stride
lowerCamelCase : int = first_layer_is_expansion
lowerCamelCase : Union[str, Any] = finegrained_output
lowerCamelCase : Optional[Any] = hidden_act
lowerCamelCase : Optional[Any] = tf_padding
lowerCamelCase : Optional[Any] = classifier_dropout_prob
lowerCamelCase : Dict = initializer_range
lowerCamelCase : str = layer_norm_eps
lowerCamelCase : Optional[Any] = semantic_loss_ignore_index
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = version.parse("1.11" )
@property
def _snake_case ( self ):
"""simple docstring"""
return OrderedDict([("pixel_values", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("logits", {0: "batch"})] )
else:
return OrderedDict([("last_hidden_state", {0: "batch"}), ("pooler_output", {0: "batch"})] )
@property
def _snake_case ( self ):
"""simple docstring"""
return 1e-4
| 283 | 1 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = [R"h\.\d+\.attn\.bias", R"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self , __A , __A , __A = None , __A = 5_0257 , __A = 1024 , __A = 768 , __A = 12 , __A = 12 , __A = None , __A = "gelu_new" , __A = 0.1 , __A = 0.1 , __A = 0.1 , __A = 1e-5 , __A = 0.02 , __A = True , __A = True , __A = False , __A = False , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Optional[Any] = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F"""`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"""
F""" `n_embd`: {n_embd} are not equal.""" )
lowerCamelCase : List[str] = prefix_inner_dim
lowerCamelCase : Dict = prefix_hidden_dim
lowerCamelCase : List[str] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
lowerCamelCase : int = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
lowerCamelCase : Union[str, Any] = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
lowerCamelCase : int = GPTaLMHeadModel(__A )
def _snake_case ( self , __A , __A , __A = None , __A = None , ):
"""simple docstring"""
lowerCamelCase : Dict = self.transformer.transformer.wte(__A )
lowerCamelCase : List[Any] = self.encode_prefix(__A )
lowerCamelCase : str = self.decode_prefix(__A )
lowerCamelCase : List[str] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
lowerCamelCase : Union[str, Any] = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
lowerCamelCase : Dict = torch.cat((dummy_token, input_ids) , dim=1 )
lowerCamelCase : Any = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _snake_case ( self , __A , __A ):
"""simple docstring"""
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.encode_prefix(__A )
@torch.no_grad()
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = torch.split(__A , 1 , dim=0 )
lowerCamelCase : List[str] = []
lowerCamelCase : List[str] = []
for feature in features:
lowerCamelCase : Any = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
lowerCamelCase , lowerCamelCase : Optional[int] = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
lowerCamelCase : Optional[Any] = torch.stack(__A )
lowerCamelCase : int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _snake_case ( self , __A=None , __A=None , __A=None , __A = 5 , __A = 67 , __A = 1.0 , __A = None , ):
"""simple docstring"""
lowerCamelCase : Optional[int] = eos_token_id
lowerCamelCase : Optional[int] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : List[Any] = torch.ones(__A , device=__A , dtype=torch.int )
lowerCamelCase : int = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
lowerCamelCase : Tuple = input_embeds
else:
lowerCamelCase : str = self.transformer.transformer.wte(__A )
for i in range(__A ):
lowerCamelCase : Union[str, Any] = self.transformer(inputs_embeds=__A )
lowerCamelCase : int = outputs.logits
lowerCamelCase : Optional[int] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
lowerCamelCase : List[str] = logits.softmax(-1 ).log()
if scores is None:
lowerCamelCase , lowerCamelCase : Any = logits.topk(__A , -1 )
lowerCamelCase : Tuple = generated.expand(__A , *generated.shape[1:] )
lowerCamelCase , lowerCamelCase : List[Any] = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
lowerCamelCase : Union[str, Any] = next_tokens
else:
lowerCamelCase : Tuple = tokens.expand(__A , *tokens.shape[1:] )
lowerCamelCase : Dict = torch.cat((tokens, next_tokens) , dim=1 )
else:
lowerCamelCase : Dict = -float(np.inf )
lowerCamelCase : Optional[Any] = 0
lowerCamelCase : Dict = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
lowerCamelCase : Optional[Any] = scores_sum / seq_lengths[:, None]
lowerCamelCase , lowerCamelCase : Any = scores_sum_average.view(-1 ).topk(__A , -1 )
lowerCamelCase : Optional[int] = next_tokens // scores_sum.shape[1]
lowerCamelCase : List[str] = seq_lengths[next_tokens_source]
lowerCamelCase : Optional[Any] = next_tokens % scores_sum.shape[1]
lowerCamelCase : Union[str, Any] = next_tokens.unsqueeze(1 )
lowerCamelCase : Union[str, Any] = tokens[next_tokens_source]
lowerCamelCase : Dict = torch.cat((tokens, next_tokens) , dim=1 )
lowerCamelCase : Union[str, Any] = generated[next_tokens_source]
lowerCamelCase : Any = scores_sum_average * seq_lengths
lowerCamelCase : Tuple = is_stopped[next_tokens_source]
lowerCamelCase : Tuple = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
lowerCamelCase : Any = torch.cat((generated, next_token_embed) , dim=1 )
lowerCamelCase : Any = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
lowerCamelCase : str = scores / seq_lengths
lowerCamelCase : Optional[Any] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
lowerCamelCase : List[Any] = [tokens[i] for i in order]
lowerCamelCase : Union[str, Any] = torch.stack(__A , dim=0 )
lowerCamelCase : Tuple = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 283 |
from math import sqrt
import numpy as np
from sympy import symbols
# Coefficient
# Speed of light (m/s)
_snake_case = 2_99_79_24_58
# Symbols
_snake_case , _snake_case , _snake_case , _snake_case = symbols('''ct x y z''')
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if velocity > c:
raise ValueError("Speed must not exceed light speed 299,792,458 [m/s]!" )
elif velocity < 1:
# Usually the speed should be much higher than 1 (c order of magnitude)
raise ValueError("Speed must be greater than or equal to 1!" )
return velocity / c
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return 1 / sqrt(1 - beta(SCREAMING_SNAKE_CASE_ ) ** 2 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return np.array(
[
[gamma(SCREAMING_SNAKE_CASE_ ), -gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), 0, 0],
[-gamma(SCREAMING_SNAKE_CASE_ ) * beta(SCREAMING_SNAKE_CASE_ ), gamma(SCREAMING_SNAKE_CASE_ ), 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
] )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None ):
'''simple docstring'''
if event is None:
lowerCamelCase : Tuple = np.array([ct, x, y, z] ) # Symbolic four vector
else:
event[0] *= c # x0 is ct (speed of light * time)
return transformation_matrix(SCREAMING_SNAKE_CASE_ ) @ event
if __name__ == "__main__":
import doctest
doctest.testmod()
# Example of symbolic vector:
_snake_case = transform(29_97_92_45)
print('''Example of four vector: ''')
print(f'''ct\' = {four_vector[0]}''')
print(f'''x\' = {four_vector[1]}''')
print(f'''y\' = {four_vector[2]}''')
print(f'''z\' = {four_vector[3]}''')
# Substitute symbols with numerical values
_snake_case = {ct: c, x: 1, y: 1, z: 1}
_snake_case = [four_vector[i].subs(sub_dict) for i in range(4)]
print(f'''\n{numerical_vector}''')
| 283 | 1 |
import argparse
import requests
import torch
from PIL import Image
from transformers import SwinConfig, SwinForMaskedImageModeling, ViTImageProcessor
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : str = SwinConfig(image_size=192 )
if "base" in model_name:
lowerCamelCase : Optional[Any] = 6
lowerCamelCase : List[str] = 128
lowerCamelCase : str = (2, 2, 18, 2)
lowerCamelCase : Optional[Any] = (4, 8, 16, 32)
elif "large" in model_name:
lowerCamelCase : List[str] = 12
lowerCamelCase : Optional[Any] = 192
lowerCamelCase : int = (2, 2, 18, 2)
lowerCamelCase : str = (6, 12, 24, 48)
else:
raise ValueError("Model not supported, only supports base and large variants" )
lowerCamelCase : str = window_size
lowerCamelCase : Union[str, Any] = embed_dim
lowerCamelCase : str = depths
lowerCamelCase : Dict = num_heads
return config
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "encoder.mask_token" in name:
lowerCamelCase : Union[str, Any] = name.replace("encoder.mask_token" , "embeddings.mask_token" )
if "encoder.patch_embed.proj" in name:
lowerCamelCase : Any = name.replace("encoder.patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "encoder.patch_embed.norm" in name:
lowerCamelCase : Union[str, Any] = name.replace("encoder.patch_embed.norm" , "embeddings.norm" )
if "attn.proj" in name:
lowerCamelCase : List[str] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
lowerCamelCase : str = name.replace("attn" , "attention.self" )
if "norm1" in name:
lowerCamelCase : Any = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
lowerCamelCase : Optional[int] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
lowerCamelCase : List[str] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
lowerCamelCase : List[Any] = name.replace("mlp.fc2" , "output.dense" )
if name == "encoder.norm.weight":
lowerCamelCase : Optional[int] = "layernorm.weight"
if name == "encoder.norm.bias":
lowerCamelCase : int = "layernorm.bias"
if "decoder" in name:
pass
else:
lowerCamelCase : int = "swin." + name
return name
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : Dict = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if "attn_mask" in key:
pass
elif "qkv" in key:
lowerCamelCase : Optional[Any] = key.split("." )
lowerCamelCase : Union[str, Any] = int(key_split[2] )
lowerCamelCase : List[Any] = int(key_split[4] )
lowerCamelCase : Union[str, Any] = model.swin.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
lowerCamelCase : Union[str, Any] = val[:dim, :]
lowerCamelCase : Optional[Any] = val[
dim : dim * 2, :
]
lowerCamelCase : Any = val[-dim:, :]
else:
lowerCamelCase : List[Any] = val[
:dim
]
lowerCamelCase : List[str] = val[
dim : dim * 2
]
lowerCamelCase : Union[str, Any] = val[
-dim:
]
else:
lowerCamelCase : str = val
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : int = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model"]
lowerCamelCase : Optional[int] = get_swin_config(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : int = SwinForMaskedImageModeling(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCamelCase : List[str] = convert_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
lowerCamelCase : List[Any] = ViTImageProcessor(size={"height": 192, "width": 192} )
lowerCamelCase : str = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
lowerCamelCase : Any = image_processor(images=SCREAMING_SNAKE_CASE_ , return_tensors="pt" )
with torch.no_grad():
lowerCamelCase : Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ ).logits
print(outputs.keys() )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(f"""Pushing model and image processor for {model_name} to hub""" )
model.push_to_hub(f"""microsoft/{model_name}""" )
image_processor.push_to_hub(f"""microsoft/{model_name}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''swin-base-simmim-window6-192''',
type=str,
choices=['''swin-base-simmim-window6-192''', '''swin-large-simmim-window12-192'''],
help='''Name of the Swin SimMIM model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''',
default='''/Users/nielsrogge/Documents/SwinSimMIM/simmim_pretrain__swin_base__img192_window6__100ep.pth''',
type=str,
help='''Path to the original PyTorch checkpoint (.pth file).''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
_snake_case = parser.parse_args()
convert_swin_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub)
| 283 |
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , *__A , **__A ):
"""simple docstring"""
warnings.warn(
"The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use DPTImageProcessor instead." , __A , )
super().__init__(*__A , **__A )
| 283 | 1 |
import numpy
class UpperCAmelCase_ :
'''simple docstring'''
def __init__( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Dict = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCamelCase : Optional[int] = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCamelCase : Tuple = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCamelCase : Optional[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCamelCase : Any = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCamelCase : Dict = numpy.zeros(output_array.shape )
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCamelCase : List[str] = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCamelCase : Tuple = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _snake_case ( self ):
"""simple docstring"""
lowerCamelCase : Dict = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCamelCase : Union[str, Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCamelCase : Optional[Any] = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _snake_case ( self , __A , __A , __A ):
"""simple docstring"""
for iteration in range(1 , iterations + 1 ):
lowerCamelCase : List[Any] = self.feedforward()
self.back_propagation()
if give_loss:
lowerCamelCase : Optional[Any] = numpy.mean(numpy.square(output - self.feedforward() ) )
print(F"""Iteration {iteration} Loss: {loss}""" )
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : Optional[int] = input_arr
lowerCamelCase : Optional[Any] = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCamelCase : str = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCamelCase : Dict = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return 1 / (1 + numpy.exp(-value ))
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
return (value) * (1 - (value))
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCamelCase : Tuple = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCamelCase : Any = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example()
| 283 |
import argparse
_snake_case = '''docs/source/_static/js/custom.js'''
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase : List[str] = f.readlines()
lowerCamelCase : int = 0
# First let's put the right version
while not lines[index].startswith("const stableVersion =" ):
index += 1
lowerCamelCase : str = f"""const stableVersion = \"v{version}\"\n"""
# Then update the dictionary
while not lines[index].startswith("const versionMapping = {" ):
index += 1
# We go until the end
while not lines[index].startswith("}" ):
index += 1
# We add the new version at the end
lines[index - 1] += f""" \"v{version}\": \"v{version}\",\n"""
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_snake_case = parser.parse_args()
update_custom_js(args.version)
| 283 | 1 |
def lowercase_( SCREAMING_SNAKE_CASE_ = 4000000 ):
'''simple docstring'''
lowerCamelCase : Any = [0, 1]
lowerCamelCase : Union[str, Any] = 0
while fib[i] <= n:
fib.append(fib[i] + fib[i + 1] )
if fib[i + 2] > n:
break
i += 1
lowerCamelCase : Union[str, Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) - 1 ):
if fib[j] % 2 == 0:
total += fib[j]
return total
if __name__ == "__main__":
print(f'''{solution() = }''')
| 283 |
from ..utils import DummyObject, requires_backends
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : str = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : List[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Dict = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Union[str, Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Any = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : Optional[Any] = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
class UpperCAmelCase_ ( metaclass=UpperCamelCase ):
'''simple docstring'''
__A : int = ["flax"]
def __init__( self , *__A , **__A ):
"""simple docstring"""
requires_backends(self , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
@classmethod
def _snake_case ( cls , *__A , **__A ):
"""simple docstring"""
requires_backends(cls , ["flax"] )
| 283 | 1 |
import warnings
from functools import wraps
from typing import Callable
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
@wraps(SCREAMING_SNAKE_CASE_ )
def _inner_fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
(f"""'{fn.__name__}' is experimental and might be subject to breaking changes in the future.""") , SCREAMING_SNAKE_CASE_ , )
return fn(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
return _inner_fn
| 283 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Dict = "openai/whisper-base"
__A : str = (
"This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the "
"transcribed text."
)
__A : Any = "transcriber"
__A : Any = WhisperProcessor
__A : int = WhisperForConditionalGeneration
__A : Any = ["audio"]
__A : List[str] = ["text"]
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor(__A , return_tensors="pt" ).input_features
def _snake_case ( self , __A ):
"""simple docstring"""
return self.model.generate(inputs=__A )
def _snake_case ( self , __A ):
"""simple docstring"""
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0]
| 283 | 1 |
import argparse
import os
import re
import packaging.version
_snake_case = '''examples/'''
_snake_case = {
'''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''),
'''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
_snake_case = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
_snake_case = '''README.md'''
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase : int = f.read()
lowerCamelCase , lowerCamelCase : Union[str, Any] = REPLACE_PATTERNS[pattern]
lowerCamelCase : Union[str, Any] = replace.replace("VERSION" , SCREAMING_SNAKE_CASE_ )
lowerCamelCase : str = re_pattern.sub(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , pattern="examples" )
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE_ )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = "🤗 Transformers currently provides the following architectures"
lowerCamelCase : List[Any] = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE_ , "r" , encoding="utf-8" , newline="\n" ) as f:
lowerCamelCase : Any = f.readlines()
# Find the start of the list.
lowerCamelCase : Union[str, Any] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
lowerCamelCase : List[str] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
lowerCamelCase : Any = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(SCREAMING_SNAKE_CASE_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE_ )
def lowercase_( ):
'''simple docstring'''
with open(REPLACE_FILES["init"] , "r" ) as f:
lowerCamelCase : Optional[Any] = f.read()
lowerCamelCase : Optional[int] = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE_ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE_ )
def lowercase_( SCREAMING_SNAKE_CASE_=False ):
'''simple docstring'''
lowerCamelCase : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
lowerCamelCase : Dict = default_version.base_version
elif patch:
lowerCamelCase : List[Any] = f"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
lowerCamelCase : List[str] = f"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
lowerCamelCase : Any = input(f"""Which version are you releasing? [{default_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
lowerCamelCase : Union[str, Any] = default_version
print(f"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ , patch=SCREAMING_SNAKE_CASE_ )
def lowercase_( ):
'''simple docstring'''
lowerCamelCase : Optional[Any] = get_version()
lowerCamelCase : Union[str, Any] = f"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
lowerCamelCase : List[Any] = current_version.base_version
# Check with the user we got that right.
lowerCamelCase : int = input(f"""Which version are we developing now? [{dev_version}]""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
lowerCamelCase : Union[str, Any] = dev_version
print(f"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
_snake_case = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work()
| 283 |
import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if "model" in orig_key:
lowerCamelCase : Dict = orig_key.replace("model." , "" )
if "norm1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
lowerCamelCase : Optional[Any] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
lowerCamelCase : int = orig_key.split("." )[0].split("_" )[-1]
lowerCamelCase : Dict = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
lowerCamelCase : List[str] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
lowerCamelCase : Union[str, Any] = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
lowerCamelCase : Optional[int] = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
lowerCamelCase : Dict = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
lowerCamelCase : List[Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
lowerCamelCase : int = "yoso." + orig_key
return orig_key
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase : List[str] = orig_state_dict.pop(SCREAMING_SNAKE_CASE_ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
lowerCamelCase : Dict = val
lowerCamelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
lowerCamelCase : Dict = torch.arange(SCREAMING_SNAKE_CASE_ ).expand((1, -1) ) + 2
return orig_state_dict
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )["model_state_dict"]
lowerCamelCase : List[str] = YosoConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : Any = YosoForMaskedLM(SCREAMING_SNAKE_CASE_ )
lowerCamelCase : List[Any] = convert_checkpoint_helper(config.max_position_embeddings , SCREAMING_SNAKE_CASE_ )
print(model.load_state_dict(SCREAMING_SNAKE_CASE_ ) )
model.eval()
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''', default=None, type=str, required=True, help='''Path to YOSO pytorch checkpoint.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for YOSO model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 283 | 1 |
import colorsys
from PIL import Image # type: ignore
def lowercase_( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
lowerCamelCase : List[str] = x
lowerCamelCase : List[str] = y
for step in range(SCREAMING_SNAKE_CASE_ ): # noqa: B007
lowerCamelCase : List[str] = a * a - b * b + x
lowerCamelCase : Dict = 2 * a * b + y
lowerCamelCase : Optional[int] = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return (255, 255, 255)
def lowercase_( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 255 ) for i in colorsys.hsv_to_rgb(SCREAMING_SNAKE_CASE_ , 1 , 1 ) )
def lowercase_( SCREAMING_SNAKE_CASE_ = 800 , SCREAMING_SNAKE_CASE_ = 600 , SCREAMING_SNAKE_CASE_ = -0.6 , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 3.2 , SCREAMING_SNAKE_CASE_ = 50 , SCREAMING_SNAKE_CASE_ = True , ):
'''simple docstring'''
lowerCamelCase : Union[str, Any] = Image.new("RGB" , (image_width, image_height) )
lowerCamelCase : str = img.load()
# loop through the image-coordinates
for image_x in range(SCREAMING_SNAKE_CASE_ ):
for image_y in range(SCREAMING_SNAKE_CASE_ ):
# determine the figure-coordinates based on the image-coordinates
lowerCamelCase : Tuple = figure_width / image_width * image_height
lowerCamelCase : Optional[Any] = figure_center_x + (image_x / image_width - 0.5) * figure_width
lowerCamelCase : int = figure_center_y + (image_y / image_height - 0.5) * figure_height
lowerCamelCase : Optional[int] = get_distance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
lowerCamelCase : int = get_color_coded_rgb(SCREAMING_SNAKE_CASE_ )
else:
lowerCamelCase : Tuple = get_black_and_white_rgb(SCREAMING_SNAKE_CASE_ )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
_snake_case = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show()
| 283 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Optional[int] = "M-CLIP"
def __init__( self , __A=1024 , __A=768 , **__A ):
"""simple docstring"""
lowerCamelCase : str = transformerDimSize
lowerCamelCase : Any = imageDimSize
super().__init__(**__A )
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
__A : Tuple = MCLIPConfig
def __init__( self , __A , *__A , **__A ):
"""simple docstring"""
super().__init__(__A , *__A , **__A )
lowerCamelCase : Tuple = XLMRobertaModel(__A )
lowerCamelCase : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def _snake_case ( self , __A , __A ):
"""simple docstring"""
lowerCamelCase : Any = self.transformer(input_ids=__A , attention_mask=__A )[0]
lowerCamelCase : int = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(__A ), embs
| 283 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.