code
stringlengths
81
54k
code_codestyle
int64
0
721
style_context
stringlengths
91
41.9k
style_context_codestyle
int64
0
699
label
int64
0
1
"""simple docstring""" import os from pathlib import Path import numpy as np import pytest from pack_dataset import pack_data_dir from parameterized import parameterized from save_len_file import save_len_file from torch.utils.data import DataLoader from transformers import AutoTokenizer from transformers.models.mbart.modeling_mbart import shift_tokens_right from transformers.testing_utils import TestCasePlus, slow from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset lowerCAmelCase_ = '''bert-base-cased''' lowerCAmelCase_ = '''google/pegasus-xsum''' lowerCAmelCase_ = [''' Sam ate lunch today.''', '''Sams lunch ingredients.'''] lowerCAmelCase_ = ['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee'''] lowerCAmelCase_ = '''patrickvonplaten/t5-tiny-random''' lowerCAmelCase_ = '''sshleifer/bart-tiny-random''' lowerCAmelCase_ = '''sshleifer/tiny-mbart''' lowerCAmelCase_ = '''sshleifer/tiny-marian-en-de''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = "\n".join(__SCREAMING_SNAKE_CASE ) Path(__SCREAMING_SNAKE_CASE ).open("""w""" ).writelines(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: for split in ["train", "val", "test"]: _dump_articles(os.path.join(__SCREAMING_SNAKE_CASE , F"""{split}.source""" ) , __SCREAMING_SNAKE_CASE ) _dump_articles(os.path.join(__SCREAMING_SNAKE_CASE , F"""{split}.target""" ) , __SCREAMING_SNAKE_CASE ) return tmp_dir class _snake_case ( snake_case__ ): """simple docstring""" @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) @slow def _lowerCAmelCase ( self : Tuple , _A : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = AutoTokenizer.from_pretrained(lowercase_) _SCREAMING_SNAKE_CASE : Tuple = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) _SCREAMING_SNAKE_CASE : List[Any] = max(len(tokenizer.encode(lowercase_)) for a in ARTICLES) _SCREAMING_SNAKE_CASE : Union[str, Any] = max(len(tokenizer.encode(lowercase_)) for a in SUMMARIES) _SCREAMING_SNAKE_CASE : Union[str, Any] = 4 _SCREAMING_SNAKE_CASE : Union[str, Any] = 8 assert max_len_target > max_src_len # Will be truncated assert max_len_source > max_src_len # Will be truncated _SCREAMING_SNAKE_CASE : Optional[int] = "ro_RO", "de_DE" # ignored for all but mbart, but never causes error. _SCREAMING_SNAKE_CASE : Dict = SeqaSeqDataset( lowercase_ , data_dir=lowercase_ , type_path="""train""" , max_source_length=lowercase_ , max_target_length=lowercase_ , src_lang=lowercase_ , tgt_lang=lowercase_ , ) _SCREAMING_SNAKE_CASE : List[Any] = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert isinstance(lowercase_ , lowercase_) assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_src_len # show that targets are the same len assert batch["labels"].shape[1] == max_tgt_len if tok_name != MBART_TINY: continue # check language codes in correct place _SCREAMING_SNAKE_CASE : List[str] = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id) assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang] assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang] break # No need to test every batch @parameterized.expand([BART_TINY, BERT_BASE_CASED]) def _lowerCAmelCase ( self : Tuple , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(lowercase_) _SCREAMING_SNAKE_CASE : int = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) _SCREAMING_SNAKE_CASE : Tuple = max(len(tokenizer.encode(lowercase_)) for a in ARTICLES) _SCREAMING_SNAKE_CASE : List[str] = max(len(tokenizer.encode(lowercase_)) for a in SUMMARIES) _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = LegacySeqaSeqDataset( lowercase_ , data_dir=lowercase_ , type_path="""train""" , max_source_length=2_0 , max_target_length=lowercase_ , ) _SCREAMING_SNAKE_CASE : Dict = DataLoader(lowercase_ , batch_size=2 , collate_fn=train_dataset.collate_fn) for batch in dataloader: assert batch["attention_mask"].shape == batch["input_ids"].shape # show that articles were trimmed. assert batch["input_ids"].shape[1] == max_len_source assert 2_0 >= batch["input_ids"].shape[1] # trimmed significantly # show that targets were truncated assert batch["labels"].shape[1] == trunc_target # Truncated assert max_len_target > trunc_target # Truncated break # No need to test every batch def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""") _SCREAMING_SNAKE_CASE : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) _SCREAMING_SNAKE_CASE : int = tmp_dir.joinpath("""train.source""").open().readlines() _SCREAMING_SNAKE_CASE : Any = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir())) pack_data_dir(lowercase_ , lowercase_ , 1_2_8 , lowercase_) _SCREAMING_SNAKE_CASE : Optional[Any] = {x.name for x in tmp_dir.iterdir()} _SCREAMING_SNAKE_CASE : List[str] = {x.name for x in save_dir.iterdir()} _SCREAMING_SNAKE_CASE : Optional[int] = save_dir.joinpath("""train.source""").open().readlines() # orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.'] # desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.'] assert len(lowercase_) < len(lowercase_) assert len(lowercase_) == 1 assert len(packed_examples[0]) == sum(len(lowercase_) for x in orig_examples) assert orig_paths == new_paths @pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""") def _lowerCAmelCase ( self : str): """simple docstring""" if not FAIRSEQ_AVAILABLE: return _SCREAMING_SNAKE_CASE : Dict = self._get_dataset(max_len=6_4) _SCREAMING_SNAKE_CASE : Union[str, Any] = 6_4 _SCREAMING_SNAKE_CASE : str = ds.make_dynamic_sampler(lowercase_ , required_batch_size_multiple=lowercase_) _SCREAMING_SNAKE_CASE : int = [len(lowercase_) for x in batch_sampler] assert len(set(lowercase_)) > 1 # it's not dynamic batch size if every batch is the same length assert sum(lowercase_) == len(lowercase_) # no dropped or added examples _SCREAMING_SNAKE_CASE : int = DataLoader(lowercase_ , batch_sampler=lowercase_ , collate_fn=ds.collate_fn , num_workers=2) _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Dict = [] for batch in data_loader: _SCREAMING_SNAKE_CASE : Union[str, Any] = batch["input_ids"].shape _SCREAMING_SNAKE_CASE : Any = src_shape[0] assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple _SCREAMING_SNAKE_CASE : Dict = np.product(batch["""input_ids"""].shape) num_src_per_batch.append(lowercase_) if num_src_tokens > (max_tokens * 1.1): failures.append(lowercase_) assert num_src_per_batch[0] == max(lowercase_) if failures: raise AssertionError(f"""too many tokens in {len(lowercase_)} batches""") def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self._get_dataset(max_len=5_1_2) _SCREAMING_SNAKE_CASE : str = 2 _SCREAMING_SNAKE_CASE : Dict = ds.make_sortish_sampler(lowercase_ , shuffle=lowercase_) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2) _SCREAMING_SNAKE_CASE : List[str] = DataLoader(lowercase_ , batch_size=lowercase_ , collate_fn=ds.collate_fn , num_workers=2 , sampler=lowercase_) _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.pad_token_id def count_pad_tokens(_A : int , _A : Optional[int]="input_ids"): return [batch[k].eq(lowercase_).sum().item() for batch in data_loader] assert sum(count_pad_tokens(lowercase_ , k="""labels""")) < sum(count_pad_tokens(lowercase_ , k="""labels""")) assert sum(count_pad_tokens(lowercase_)) < sum(count_pad_tokens(lowercase_)) assert len(lowercase_) == len(lowercase_) def _lowerCAmelCase ( self : Tuple , _A : str=1_0_0_0 , _A : List[Any]=1_2_8): """simple docstring""" if os.getenv("""USE_REAL_DATA""" , lowercase_): _SCREAMING_SNAKE_CASE : List[Any] = "examples/seq2seq/wmt_en_ro" _SCREAMING_SNAKE_CASE : Tuple = max_len * 2 * 6_4 if not Path(lowercase_).joinpath("""train.len""").exists(): save_len_file(lowercase_ , lowercase_) else: _SCREAMING_SNAKE_CASE : Tuple = "examples/seq2seq/test_data/wmt_en_ro" _SCREAMING_SNAKE_CASE : str = max_len * 4 save_len_file(lowercase_ , lowercase_) _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained(lowercase_) _SCREAMING_SNAKE_CASE : Any = SeqaSeqDataset( lowercase_ , data_dir=lowercase_ , type_path="""train""" , max_source_length=lowercase_ , max_target_length=lowercase_ , n_obs=lowercase_ , ) return ds, max_tokens, tokenizer def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self._get_dataset() _SCREAMING_SNAKE_CASE : Optional[int] = set(DistributedSortishSampler(lowercase_ , 2_5_6 , num_replicas=2 , rank=0 , add_extra_examples=lowercase_)) _SCREAMING_SNAKE_CASE : List[str] = set(DistributedSortishSampler(lowercase_ , 2_5_6 , num_replicas=2 , rank=1 , add_extra_examples=lowercase_)) assert idsa.intersection(lowercase_) == set() @parameterized.expand( [ MBART_TINY, MARIAN_TINY, T5_TINY, BART_TINY, PEGASUS_XSUM, ] , ) def _lowerCAmelCase ( self : List[str] , _A : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained(lowercase_ , use_fast=lowercase_) if tok_name == MBART_TINY: _SCREAMING_SNAKE_CASE : Dict = SeqaSeqDataset( lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , ) _SCREAMING_SNAKE_CASE : Dict = train_dataset.dataset_kwargs assert "src_lang" in kwargs and "tgt_lang" in kwargs else: _SCREAMING_SNAKE_CASE : Any = SeqaSeqDataset( lowercase_ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir()) , type_path="""train""" , max_source_length=4 , max_target_length=8 , ) _SCREAMING_SNAKE_CASE : Tuple = train_dataset.dataset_kwargs assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs assert len(lowercase_) == 1 if tok_name == BART_TINY else len(lowercase_) == 0
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
"""simple docstring""" import os import sys import unittest lowerCAmelCase_ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) sys.path.append(os.path.join(git_repo_path, '''utils''')) import check_dummies # noqa: E402 from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402 # Align TRANSFORMERS_PATH in check_dummies with the current path lowerCAmelCase_ = os.path.join(git_repo_path, '''src''', '''transformers''') lowerCAmelCase_ = "\n{0} = None\n" lowerCAmelCase_ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n" lowerCAmelCase_ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n" class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = find_backend(""" _import_structure[\"models.albert\"].append(\"AlbertTokenizerFast\")""") self.assertIsNone(A_) _SCREAMING_SNAKE_CASE : Optional[int] = find_backend(""" if not is_tokenizers_available():""") self.assertEqual(A_ , """tokenizers""") _SCREAMING_SNAKE_CASE : Optional[Any] = find_backend(""" if not is_tensorflow_text_available():""") self.assertEqual(A_ , """tensorflow_text""") _SCREAMING_SNAKE_CASE : Union[str, Any] = find_backend(""" if not (is_sentencepiece_available() and is_tokenizers_available()):""") self.assertEqual(A_ , """sentencepiece_and_tokenizers""") _SCREAMING_SNAKE_CASE : int = find_backend( """ if not (is_sentencepiece_available() and is_tensorflow_text_available()):""") self.assertEqual(A_ , """sentencepiece_and_tensorflow_text""") _SCREAMING_SNAKE_CASE : List[Any] = find_backend( """ if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):""") self.assertEqual(A_ , """sentencepiece_and_tokenizers_and_vision""") def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = read_init() # We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects self.assertIn("""torch""" , A_) self.assertIn("""tensorflow_text""" , A_) self.assertIn("""sentencepiece_and_tokenizers""" , A_) # Likewise, we can't assert on the exact content of a key self.assertIn("""BertModel""" , objects["""torch"""]) self.assertIn("""TFBertModel""" , objects["""tf"""]) self.assertIn("""FlaxBertModel""" , objects["""flax"""]) self.assertIn("""BertModel""" , objects["""torch"""]) self.assertIn("""TFBertTokenizer""" , objects["""tensorflow_text"""]) self.assertIn("""convert_slow_tokenizer""" , objects["""sentencepiece_and_tokenizers"""]) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = create_dummy_object("""CONSTANT""" , """\'torch\'""") self.assertEqual(A_ , """\nCONSTANT = None\n""") _SCREAMING_SNAKE_CASE : List[str] = create_dummy_object("""function""" , """\'torch\'""") self.assertEqual( A_ , """\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n""") _SCREAMING_SNAKE_CASE : Optional[int] = """ class FakeClass(metaclass=DummyObject): _backends = \'torch\' def __init__(self, *args, **kwargs): requires_backends(self, \'torch\') """ _SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_object("""FakeClass""" , """\'torch\'""") self.assertEqual(A_ , A_) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = """# This file is autogenerated by the command `make fix-copies`, do not edit. from ..utils import DummyObject, requires_backends CONSTANT = None def function(*args, **kwargs): requires_backends(function, [\"torch\"]) class FakeClass(metaclass=DummyObject): _backends = [\"torch\"] def __init__(self, *args, **kwargs): requires_backends(self, [\"torch\"]) """ _SCREAMING_SNAKE_CASE : Union[str, Any] = create_dummy_files({"""torch""": ["""CONSTANT""", """function""", """FakeClass"""]}) self.assertEqual(dummy_files["""torch"""] , A_)
705
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
0
"""simple docstring""" import argparse import os from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_task_guides.py lowerCAmelCase_ = 'src/transformers' lowerCAmelCase_ = 'docs/source/en/tasks' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _SCREAMING_SNAKE_CASE : Dict = f.readlines() # Find the start prompt. _SCREAMING_SNAKE_CASE : str = 0 while not lines[start_index].startswith(_lowerCamelCase ): start_index += 1 start_index += 1 _SCREAMING_SNAKE_CASE : List[str] = start_index while not lines[end_index].startswith(_lowerCamelCase ): end_index += 1 end_index -= 1 while len(lines[start_index] ) <= 1: start_index += 1 while len(lines[end_index] ) <= 1: end_index -= 1 end_index += 1 return "".join(lines[start_index:end_index] ), start_index, end_index, lines # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase_ = direct_transformers_import(TRANSFORMERS_PATH) lowerCAmelCase_ = { 'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES, 'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES, 'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES, 'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES, 'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES, 'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES, 'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES, 'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES, 'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES, 'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES, 'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES, 'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES, 'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES, 'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES, 'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES, } # This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any # `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`). lowerCAmelCase_ = { 'summarization.md': ('nllb',), 'translation.md': ('nllb',), } def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = TASK_GUIDE_TO_MODELS[task_guide] _SCREAMING_SNAKE_CASE : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase , set() ) _SCREAMING_SNAKE_CASE : Optional[int] = { code: name for code, name in transformers_module.MODEL_NAMES_MAPPING.items() if (code in model_maping_names or code in special_model_types) } return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n" def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> Any: _SCREAMING_SNAKE_CASE : List[str] = _find_text_in_file( filename=os.path.join(_lowerCamelCase , _lowerCamelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , ) _SCREAMING_SNAKE_CASE : str = get_model_list_for_task(_lowerCamelCase ) if current_list != new_list: if overwrite: with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(lines[:start_index] + [new_list] + lines[end_index:] ) else: raise ValueError( F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`""" """ to fix this.""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''') lowerCAmelCase_ = parser.parse_args() for task_guide in TASK_GUIDE_TO_MODELS.keys(): check_model_list_for_task(task_guide, args.fix_and_overwrite)
706
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
0
"""simple docstring""" import argparse from typing import Dict import tensorflow as tf import torch from tqdm import tqdm from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration lowerCAmelCase_ = [ # tf -> hf ("/", "."), ("layer_", "layers."), ("kernel", "weight"), ("beta", "bias"), ("gamma", "weight"), ("pegasus", "model"), ] lowerCAmelCase_ = [ (".output.dense", ".fc2"), ("intermediate.LayerNorm", "final_layer_norm"), ("intermediate.dense", "fc1"), ] lowerCAmelCase_ = ( INIT_COMMON + [ ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.out_proj"), ("attention.self", "self_attn"), ("attention.encdec.LayerNorm", "encoder_attn_layer_norm"), ("attention.encdec_output.dense", "encoder_attn.out_proj"), ("attention.encdec", "encoder_attn"), ("key", "k_proj"), ("value", "v_proj"), ("query", "q_proj"), ("decoder.LayerNorm", "decoder.layernorm_embedding"), ] + END_COMMON ) lowerCAmelCase_ = ( INIT_COMMON + [ ("embeddings.word_embeddings", "shared.weight"), ("embeddings.position_embeddings", "embed_positions.weight"), ("attention.self.LayerNorm", "self_attn_layer_norm"), ("attention.output.dense", "self_attn.output"), ("attention.self", "self_attn.self"), ("encoder.LayerNorm", "encoder.layernorm_embedding"), ] + END_COMMON ) lowerCAmelCase_ = [ "encdec/key/bias", "encdec/query/bias", "encdec/value/bias", "self/key/bias", "self/query/bias", "self/value/bias", "encdec_output/dense/bias", "attention/output/dense/bias", ] def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: for tf_name, hf_name in patterns: _SCREAMING_SNAKE_CASE : Any = k.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) return k def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : str = BigBirdPegasusConfig(**_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = BigBirdPegasusForConditionalGeneration(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = torch_model.state_dict() _SCREAMING_SNAKE_CASE : int = {} # separating decoder weights _SCREAMING_SNAKE_CASE : Tuple = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )} _SCREAMING_SNAKE_CASE : Optional[int] = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )} for k, v in tqdm(decoder_weights.items() , """tf -> hf conversion""" ): _SCREAMING_SNAKE_CASE : Tuple = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE] if any(_SCREAMING_SNAKE_CASE ): continue _SCREAMING_SNAKE_CASE : Union[str, Any] = DECODER_PATTERNS _SCREAMING_SNAKE_CASE : List[Any] = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_k not in state_dict: raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): _SCREAMING_SNAKE_CASE : str = v.T _SCREAMING_SNAKE_CASE : Optional[Any] = torch.from_numpy(_SCREAMING_SNAKE_CASE ) assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" for k, v in tqdm(remaining_weights.items() , """tf -> hf conversion""" ): _SCREAMING_SNAKE_CASE : Optional[Any] = [k.endswith(_SCREAMING_SNAKE_CASE ) for ending in KEYS_TO_IGNORE] if any(_SCREAMING_SNAKE_CASE ): continue _SCREAMING_SNAKE_CASE : int = REMAINING_PATTERNS _SCREAMING_SNAKE_CASE : int = rename_state_dict_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings": raise ValueError(F"""could not find new key {new_k} in state dict. (converted from {k})""" ) if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ): _SCREAMING_SNAKE_CASE : Any = v.T _SCREAMING_SNAKE_CASE : Tuple = torch.from_numpy(_SCREAMING_SNAKE_CASE ) if k != "pegasus/embeddings/position_embeddings": assert v.shape == state_dict[new_k].shape, F"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}""" _SCREAMING_SNAKE_CASE : str = mapping["""model.embed_positions.weight"""] _SCREAMING_SNAKE_CASE : Tuple = mapping.pop("""model.embed_positions.weight""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = torch_model.load_state_dict(_SCREAMING_SNAKE_CASE , strict=_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = [ k for k in missing if k not in [ """final_logits_bias""", """model.encoder.embed_tokens.weight""", """model.decoder.embed_tokens.weight""", """lm_head.weight""", ] ] assert unexpected_missing == [], F"""no matches found for the following torch keys {unexpected_missing}""" assert extra == [], F"""no matches found for the following tf keys {extra}""" return torch_model def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : List[Any] = tf.train.list_variables(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = {} _SCREAMING_SNAKE_CASE : int = ["""global_step"""] for name, shape in tqdm(_SCREAMING_SNAKE_CASE , desc="""converting tf checkpoint to dict""" ): _SCREAMING_SNAKE_CASE : Any = any(pat in name for pat in ignore_name ) if skip_key: continue _SCREAMING_SNAKE_CASE : Union[str, Any] = tf.train.load_variable(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = array return tf_weights def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[Any] = get_tf_weights_as_numpy(_SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = convert_bigbird_pegasus(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) torch_model.save_pretrained(_SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--tf_ckpt_path''', type=str, help='''passed to tf.train.list_variables''') parser.add_argument('''--save_dir''', default=None, type=str, help='''Path to the output PyTorch model.''') lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = {} convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
707
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
0
"""simple docstring""" lowerCAmelCase_ = 8.3_14_45_98 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float: if temperature < 0: raise Exception("""Temperature cannot be less than 0 K""" ) if molar_mass <= 0: raise Exception("""Molar mass cannot be less than or equal to 0 kg/mol""" ) else: return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5 if __name__ == "__main__": import doctest # run doctest doctest.testmod() # example lowerCAmelCase_ = 300 lowerCAmelCase_ = 28 lowerCAmelCase_ = rms_speed_of_molecule(temperature, molar_mass) print(F"Vrms of Nitrogen gas at 300 K is {vrms} m/s")
708
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
0
"""simple docstring""" from collections import OrderedDict from typing import TYPE_CHECKING, Any, Mapping, Optional from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging if TYPE_CHECKING: from ... import FeatureExtractionMixin, TensorType lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''openai/imagegpt-small''': '''''', '''openai/imagegpt-medium''': '''''', '''openai/imagegpt-large''': '''''', } class _snake_case ( __A ): """simple docstring""" a = """imagegpt""" a = ["""past_key_values"""] a = { """hidden_size""": """n_embd""", """max_position_embeddings""": """n_positions""", """num_attention_heads""": """n_head""", """num_hidden_layers""": """n_layer""", } def __init__( self : Any , _A : int=5_1_2 + 1 , _A : Any=3_2 * 3_2 , _A : str=5_1_2 , _A : Dict=2_4 , _A : List[str]=8 , _A : Optional[int]=None , _A : Any="quick_gelu" , _A : List[str]=0.1 , _A : Tuple=0.1 , _A : List[str]=0.1 , _A : Dict=1e-5 , _A : Dict=0.02 , _A : str=True , _A : str=True , _A : int=False , _A : Optional[int]=False , _A : Optional[Any]=False , **_A : Union[str, Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = vocab_size _SCREAMING_SNAKE_CASE : str = n_positions _SCREAMING_SNAKE_CASE : Dict = n_embd _SCREAMING_SNAKE_CASE : Tuple = n_layer _SCREAMING_SNAKE_CASE : List[str] = n_head _SCREAMING_SNAKE_CASE : List[Any] = n_inner _SCREAMING_SNAKE_CASE : Union[str, Any] = activation_function _SCREAMING_SNAKE_CASE : List[str] = resid_pdrop _SCREAMING_SNAKE_CASE : Optional[Any] = embd_pdrop _SCREAMING_SNAKE_CASE : Any = attn_pdrop _SCREAMING_SNAKE_CASE : Tuple = layer_norm_epsilon _SCREAMING_SNAKE_CASE : Optional[Any] = initializer_range _SCREAMING_SNAKE_CASE : Union[str, Any] = scale_attn_weights _SCREAMING_SNAKE_CASE : Dict = use_cache _SCREAMING_SNAKE_CASE : Dict = scale_attn_by_inverse_layer_idx _SCREAMING_SNAKE_CASE : Union[str, Any] = reorder_and_upcast_attn _SCREAMING_SNAKE_CASE : List[Any] = tie_word_embeddings super().__init__(tie_word_embeddings=UpperCamelCase__ , **UpperCamelCase__) class _snake_case ( __A ): """simple docstring""" @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return OrderedDict( [ ("""input_ids""", {0: """batch""", 1: """sequence"""}), ]) def _lowerCAmelCase ( self : Dict , _A : Optional[int] , _A : Optional[Any] = 1 , _A : int = -1 , _A : Optional[int] = False , _A : str = None , _A : Dict = 3 , _A : List[Any] = 3_2 , _A : Tuple = 3_2 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self._generate_dummy_images(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__) _SCREAMING_SNAKE_CASE : List[Any] = dict(preprocessor(images=UpperCamelCase__ , return_tensors=UpperCamelCase__)) return inputs
709
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { '''configuration_mask2former''': [ '''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Mask2FormerConfig''', ], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''Mask2FormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''Mask2FormerForUniversalSegmentation''', '''Mask2FormerModel''', '''Mask2FormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .image_processing_maskaformer import MaskaFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskaformer import ( MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskaFormerForUniversalSegmentation, MaskaFormerModel, MaskaFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
710
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
0
import math import os from copy import deepcopy import datasets import evaluate import torch import transformers from datasets import load_dataset from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer from accelerate import Accelerator from accelerate.test_utils import RegressionDataset, RegressionModel from accelerate.utils import is_tpu_available, set_seed lowerCAmelCase_ = '''true''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=16 )-> List[str]: set_seed(42 ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RegressionModel() _SCREAMING_SNAKE_CASE : List[str] = deepcopy(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = RegressionDataset(length=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = DataLoader(__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) model.to(accelerator.device ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return model, ddp_model, dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> int: _SCREAMING_SNAKE_CASE : Tuple = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Any = load_dataset("""glue""" , """mrpc""" , split="""validation""" ) def tokenize_function(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : Optional[Any] = dataset.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) _SCREAMING_SNAKE_CASE : Tuple = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): if use_longest: return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" ) return tokenizer.pad(__SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=128 , return_tensors="""pt""" ) return DataLoader(__SCREAMING_SNAKE_CASE , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=16 ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : List[str] = Accelerator(dispatch_batches=__SCREAMING_SNAKE_CASE , split_batches=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = get_dataloader(__SCREAMING_SNAKE_CASE , not dispatch_batches ) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained( """hf-internal-testing/mrpc-bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = accelerator.prepare(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : int = [] for batch in dataloader: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = batch.values() with torch.no_grad(): _SCREAMING_SNAKE_CASE : int = model(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.gather_for_metrics((logit, target) ) logits_and_targets.append((logit, target) ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = [], [] for logit, targ in logits_and_targets: logits.append(__SCREAMING_SNAKE_CASE ) targs.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = torch.cat(__SCREAMING_SNAKE_CASE ), torch.cat(__SCREAMING_SNAKE_CASE ) return logits, targs def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=82 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=16 )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = get_basic_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = generate_predictions(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert ( len(__SCREAMING_SNAKE_CASE ) == num_samples ), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__SCREAMING_SNAKE_CASE )}""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE = False , __SCREAMING_SNAKE_CASE = False )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Tuple = evaluate.load("""glue""" , """mrpc""" ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_mrpc_setup(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # First do baseline _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = setup["""no"""] model.to(__SCREAMING_SNAKE_CASE ) model.eval() for batch in dataloader: batch.to(__SCREAMING_SNAKE_CASE ) with torch.inference_mode(): _SCREAMING_SNAKE_CASE : Union[str, Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = outputs.logits.argmax(dim=-1 ) metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=batch["""labels"""] ) _SCREAMING_SNAKE_CASE : str = metric.compute() # Then do distributed _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = setup["""ddp"""] model.eval() for batch in dataloader: with torch.inference_mode(): _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : str = batch["""labels"""] _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((preds, references) ) metric.add_batch(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = metric.compute() for key in "accuracy f1".split(): assert math.isclose( baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n""" def lowerCamelCase_()-> List[str]: _SCREAMING_SNAKE_CASE : Tuple = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE ) if accelerator.is_local_main_process: datasets.utils.logging.set_verbosity_warning() transformers.utils.logging.set_verbosity_warning() else: datasets.utils.logging.set_verbosity_error() transformers.utils.logging.set_verbosity_error() # These are a bit slower so they should only be ran on the GPU or TPU if torch.cuda.is_available() or is_tpu_available(): if accelerator.is_local_main_process: print("""**Testing gather_for_metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" ) test_mrpc(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test torch metrics**""" ) for split_batches in [True, False]: for dispatch_batches in [True, False]: _SCREAMING_SNAKE_CASE : Optional[int] = Accelerator(split_batches=__SCREAMING_SNAKE_CASE , dispatch_batches=__SCREAMING_SNAKE_CASE ) if accelerator.is_local_main_process: print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" ) test_torch_metrics(__SCREAMING_SNAKE_CASE , 99 ) accelerator.state._reset_state() if accelerator.is_local_main_process: print("""**Test last batch is not dropped when perfectly divisible**""" ) _SCREAMING_SNAKE_CASE : List[str] = Accelerator() test_torch_metrics(__SCREAMING_SNAKE_CASE , 512 ) accelerator.state._reset_state() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple: # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
711
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = " " )-> list: _SCREAMING_SNAKE_CASE : Any = [] _SCREAMING_SNAKE_CASE : int = 0 for index, char in enumerate(__lowerCAmelCase ): if char == separator: split_words.append(string[last_index:index] ) _SCREAMING_SNAKE_CASE : List[str] = index + 1 elif index + 1 == len(__lowerCAmelCase ): split_words.append(string[last_index : index + 1] ) return split_words if __name__ == "__main__": from doctest import testmod testmod()
712
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
0
import argparse import json import re from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import ( MobileNetVaConfig, MobileNetVaForImageClassification, MobileNetVaImageProcessor, load_tf_weights_in_mobilenet_va, ) from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Tuple = MobileNetVaConfig(layer_norm_eps=0.0_01 ) if "_quant" in model_name: raise ValueError("""Quantized models are not supported.""" ) _SCREAMING_SNAKE_CASE : Any = re.match(R"""^mobilenet_v1_([^_]*)_([^_]*)$""" , a_ ) if matches: _SCREAMING_SNAKE_CASE : List[Any] = float(matches[1] ) _SCREAMING_SNAKE_CASE : Tuple = int(matches[2] ) # The TensorFlow version of MobileNetV1 predicts 1001 classes instead of # the usual 1000. The first class (index 0) is "background". _SCREAMING_SNAKE_CASE : List[str] = 1_001 _SCREAMING_SNAKE_CASE : Union[str, Any] = '''imagenet-1k-id2label.json''' _SCREAMING_SNAKE_CASE : Optional[int] = '''huggingface/label-files''' _SCREAMING_SNAKE_CASE : Tuple = json.load(open(hf_hub_download(a_ , a_ , repo_type="""dataset""" ) , """r""" ) ) _SCREAMING_SNAKE_CASE : Any = {int(a_ ) + 1: v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE : int = '''background''' _SCREAMING_SNAKE_CASE : str = idalabel _SCREAMING_SNAKE_CASE : List[Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_()-> List[str]: _SCREAMING_SNAKE_CASE : Optional[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg''' _SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(requests.get(a_ , stream=a_ ).raw ) return im @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE : int = get_mobilenet_va_config(a_ ) # Load 🤗 model _SCREAMING_SNAKE_CASE : Any = MobileNetVaForImageClassification(a_ ).eval() # Load weights from TensorFlow checkpoint load_tf_weights_in_mobilenet_va(a_ , a_ , a_ ) # Check outputs on an image, prepared by MobileNetV1ImageProcessor _SCREAMING_SNAKE_CASE : Tuple = MobileNetVaImageProcessor( crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , ) _SCREAMING_SNAKE_CASE : Tuple = image_processor(images=prepare_img() , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE : Optional[int] = model(**a_ ) _SCREAMING_SNAKE_CASE : Optional[Any] = outputs.logits assert logits.shape == (1, 1_001) if model_name == "mobilenet_v1_1.0_224": _SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor([-4.17_39, -1.12_33, 3.12_05] ) elif model_name == "mobilenet_v1_0.75_192": _SCREAMING_SNAKE_CASE : str = torch.tensor([-3.94_40, -2.31_41, -0.33_33] ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = None if expected_logits is not None: assert torch.allclose(logits[0, :3] , a_ , atol=1e-4 ) Path(a_ ).mkdir(exist_ok=a_ ) print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" ) model.save_pretrained(a_ ) print(F"""Saving image processor to {pytorch_dump_folder_path}""" ) image_processor.save_pretrained(a_ ) if push_to_hub: print("""Pushing to the hub...""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = '''google/''' + model_name image_processor.push_to_hub(a_ ) model.push_to_hub(a_ ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''mobilenet_v1_1.0_224''', type=str, help='''Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.''', ) parser.add_argument( '''--checkpoint_path''', required=True, type=str, help='''Path to the original TensorFlow checkpoint (.ckpt file).''' ) parser.add_argument( '''--pytorch_dump_folder_path''', required=True, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_movilevit_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
713
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
0
"""simple docstring""" import unittest from transformers import AlbertConfig, is_torch_available from transformers.models.auto import get_values from transformers.testing_utils import require_torch, slow, torch_device from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, ) from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST class _snake_case : """simple docstring""" def __init__( self : Any , _A : Tuple , _A : Optional[int]=1_3 , _A : Tuple=7 , _A : int=True , _A : Optional[int]=True , _A : int=True , _A : Optional[int]=True , _A : Dict=9_9 , _A : Any=1_6 , _A : Any=3_6 , _A : List[Any]=6 , _A : Optional[int]=6 , _A : int=6 , _A : Dict=3_7 , _A : List[Any]="gelu" , _A : Optional[int]=0.1 , _A : str=0.1 , _A : Any=5_1_2 , _A : Optional[Any]=1_6 , _A : List[str]=2 , _A : Tuple=0.02 , _A : List[Any]=3 , _A : Any=4 , _A : List[str]=None , ): """simple docstring""" _SCREAMING_SNAKE_CASE = parent _SCREAMING_SNAKE_CASE = batch_size _SCREAMING_SNAKE_CASE = seq_length _SCREAMING_SNAKE_CASE = is_training _SCREAMING_SNAKE_CASE = use_input_mask _SCREAMING_SNAKE_CASE = use_token_type_ids _SCREAMING_SNAKE_CASE = use_labels _SCREAMING_SNAKE_CASE = vocab_size _SCREAMING_SNAKE_CASE = embedding_size _SCREAMING_SNAKE_CASE = hidden_size _SCREAMING_SNAKE_CASE = num_hidden_layers _SCREAMING_SNAKE_CASE = num_hidden_groups _SCREAMING_SNAKE_CASE = num_attention_heads _SCREAMING_SNAKE_CASE = intermediate_size _SCREAMING_SNAKE_CASE = hidden_act _SCREAMING_SNAKE_CASE = hidden_dropout_prob _SCREAMING_SNAKE_CASE = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE = max_position_embeddings _SCREAMING_SNAKE_CASE = type_vocab_size _SCREAMING_SNAKE_CASE = type_sequence_label_size _SCREAMING_SNAKE_CASE = initializer_range _SCREAMING_SNAKE_CASE = num_labels _SCREAMING_SNAKE_CASE = num_choices _SCREAMING_SNAKE_CASE = scope def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size) _SCREAMING_SNAKE_CASE = None if self.use_input_mask: _SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length]) _SCREAMING_SNAKE_CASE = None if self.use_token_type_ids: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size) _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None _SCREAMING_SNAKE_CASE = None if self.use_labels: _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] , self.num_labels) _SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.num_choices) _SCREAMING_SNAKE_CASE = self.get_config() return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def _lowerCAmelCase ( self : Dict): """simple docstring""" return AlbertConfig( vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , ) def _lowerCAmelCase ( self : Any , _A : str , _A : str , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Dict , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE = AlbertModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A) _SCREAMING_SNAKE_CASE = model(_A , token_type_ids=_A) _SCREAMING_SNAKE_CASE = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size)) def _lowerCAmelCase ( self : Dict , _A : Any , _A : Optional[Any] , _A : str , _A : str , _A : Optional[Any] , _A : int , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE = AlbertForPreTraining(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , ) self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels)) def _lowerCAmelCase ( self : Dict , _A : int , _A : str , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : str , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE = AlbertForMaskedLM(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size)) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any] , _A : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE = AlbertForQuestionAnswering(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE = model( _A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , ) self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length)) def _lowerCAmelCase ( self : Union[str, Any] , _A : Tuple , _A : List[str] , _A : List[str] , _A : Optional[int] , _A : List[Any] , _A : Tuple , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = AlbertForSequenceClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels)) def _lowerCAmelCase ( self : Tuple , _A : List[Any] , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[int] , _A : str , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE = self.num_labels _SCREAMING_SNAKE_CASE = AlbertForTokenClassification(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels)) def _lowerCAmelCase ( self : Union[str, Any] , _A : int , _A : Optional[Any] , _A : List[str] , _A : List[Any] , _A : List[str] , _A : List[str] , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE = self.num_choices _SCREAMING_SNAKE_CASE = AlbertForMultipleChoice(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE = input_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _SCREAMING_SNAKE_CASE = token_type_ids.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _SCREAMING_SNAKE_CASE = input_mask.unsqueeze(1).expand(-1 , self.num_choices , -1).contiguous() _SCREAMING_SNAKE_CASE = model( _A , attention_mask=_A , token_type_ids=_A , labels=_A , ) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices)) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs() ( ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ( _SCREAMING_SNAKE_CASE ) , ) = config_and_inputs _SCREAMING_SNAKE_CASE = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask} return config, inputs_dict @require_torch class _snake_case ( _a , _a , unittest.TestCase ): """simple docstring""" a = ( ( AlbertModel, AlbertForPreTraining, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertForQuestionAnswering, ) if is_torch_available() else () ) a = ( { "feature-extraction": AlbertModel, "fill-mask": AlbertForMaskedLM, "question-answering": AlbertForQuestionAnswering, "text-classification": AlbertForSequenceClassification, "token-classification": AlbertForTokenClassification, "zero-shot": AlbertForSequenceClassification, } if is_torch_available() else {} ) a = True def _lowerCAmelCase ( self : Optional[int] , _A : int , _A : str , _A : str=False): """simple docstring""" _SCREAMING_SNAKE_CASE = super()._prepare_for_class(_A , _A , return_labels=_A) if return_labels: if model_class in get_values(_A): _SCREAMING_SNAKE_CASE = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A) _SCREAMING_SNAKE_CASE = torch.zeros( self.model_tester.batch_size , dtype=torch.long , device=_A) return inputs_dict def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE = AlbertModelTester(self) _SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=_A , hidden_size=3_7) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" self.config_tester.run_common_tests() def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_pretraining(*_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*_A) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: _SCREAMING_SNAKE_CASE = type self.model_tester.create_and_check_model(*_A) @slow def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained(_A) self.assertIsNotNone(_A) @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE = AlbertModel.from_pretrained("""albert-base-v2""") _SCREAMING_SNAKE_CASE = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]]) _SCREAMING_SNAKE_CASE = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]) with torch.no_grad(): _SCREAMING_SNAKE_CASE = model(_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE = torch.Size((1, 1_1, 7_6_8)) self.assertEqual(output.shape , _A) _SCREAMING_SNAKE_CASE = torch.tensor( [[[-0.6_513, 1.5_035, -0.2_766], [-0.6_515, 1.5_046, -0.2_780], [-0.6_512, 1.5_049, -0.2_784]]]) self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1e-4))
714
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: return " ".join( """""".join(word[::-1] ) if len(UpperCAmelCase__ ) > 4 else word for word in sentence.split() ) if __name__ == "__main__": import doctest doctest.testmod() print(reverse_long_words('''Hey wollef sroirraw'''))
715
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
0
"""simple docstring""" from __future__ import annotations from random import random from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''KT''') lowerCAmelCase_ = TypeVar('''VT''') class _snake_case ( Generic[KT, VT] ): """simple docstring""" def __init__( self : List[str] , _A : str = "root" , _A : int = None): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = key _SCREAMING_SNAKE_CASE : int = value _SCREAMING_SNAKE_CASE : int = [] def __repr__( self : Dict): """simple docstring""" return f"""Node({self.key}: {self.value})""" @property def _lowerCAmelCase ( self : int): """simple docstring""" return len(self.forward) class _snake_case ( Generic[KT, VT] ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] = 0.5 , _A : Any = 1_6): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = Node[KT, VT]() _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = p _SCREAMING_SNAKE_CASE : List[str] = max_level def __str__( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = list(self) if len(_A) == 0: return f"""SkipList(level={self.level})""" _SCREAMING_SNAKE_CASE : str = max((len(str(_A)) for item in items) , default=4) _SCREAMING_SNAKE_CASE : Optional[Any] = max(_A , 4) + 4 _SCREAMING_SNAKE_CASE : List[str] = self.head _SCREAMING_SNAKE_CASE : str = [] _SCREAMING_SNAKE_CASE : List[str] = node.forward.copy() lines.append(f"""[{node.key}]""".ljust(_A , """-""") + """* """ * len(_A)) lines.append(""" """ * label_size + """| """ * len(_A)) while len(node.forward) != 0: _SCREAMING_SNAKE_CASE : Tuple = node.forward[0] lines.append( f"""[{node.key}]""".ljust(_A , """-""") + """ """.join(str(n.key) if n.key == node.key else """|""" for n in forwards)) lines.append(""" """ * label_size + """| """ * len(_A)) _SCREAMING_SNAKE_CASE : Optional[Any] = node.forward lines.append("""None""".ljust(_A) + """* """ * len(_A)) return f"""SkipList(level={self.level})\n""" + "\n".join(_A) def __iter__( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.head while len(node.forward) != 0: yield node.forward[0].key _SCREAMING_SNAKE_CASE : str = node.forward[0] def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = 1 while random() < self.p and level < self.max_level: level += 1 return level def _lowerCAmelCase ( self : Union[str, Any] , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : int = [] _SCREAMING_SNAKE_CASE : Union[str, Any] = self.head for i in reversed(range(self.level)): # i < node.level - When node level is lesser than `i` decrement `i`. # node.forward[i].key < key - Jumping to node with key value higher # or equal to searched key would result # in skipping searched key. while i < node.level and node.forward[i].key < key: _SCREAMING_SNAKE_CASE : Optional[int] = node.forward[i] # Each leftmost node (relative to searched node) will potentially have to # be updated. update_vector.append(_A) update_vector.reverse() # Note that we were inserting values in reverse order. # len(node.forward) != 0 - If current node doesn't contain any further # references then searched key is not present. # node.forward[0].key == key - Next node key should be equal to search key # if key is present. if len(node.forward) != 0 and node.forward[0].key == key: return node.forward[0], update_vector else: return None, update_vector def _lowerCAmelCase ( self : List[str] , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Dict = self._locate_node(_A) if node is not None: for i, update_node in enumerate(_A): # Remove or replace all references to removed node. if update_node.level > i and update_node.forward[i].key == key: if node.level > i: _SCREAMING_SNAKE_CASE : Tuple = node.forward[i] else: _SCREAMING_SNAKE_CASE : str = update_node.forward[:i] def _lowerCAmelCase ( self : int , _A : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self._locate_node(_A) if node is not None: _SCREAMING_SNAKE_CASE : Dict = value else: _SCREAMING_SNAKE_CASE : Optional[int] = self.random_level() if level > self.level: # After level increase we have to add additional nodes to head. for _ in range(self.level - 1 , _A): update_vector.append(self.head) _SCREAMING_SNAKE_CASE : str = level _SCREAMING_SNAKE_CASE : List[Any] = Node(_A , _A) for i, update_node in enumerate(update_vector[:level]): # Change references to pass through new node. if update_node.level > i: new_node.forward.append(update_node.forward[i]) if update_node.level < i + 1: update_node.forward.append(_A) else: _SCREAMING_SNAKE_CASE : Optional[Any] = new_node def _lowerCAmelCase ( self : Union[str, Any] , _A : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = self._locate_node(_A) if node is not None: return node.value return None def lowerCamelCase_( )-> Dict: _SCREAMING_SNAKE_CASE : Union[str, Any] = SkipList() skip_list.insert("""Key1""" , 3 ) skip_list.insert("""Key2""" , 12 ) skip_list.insert("""Key3""" , 41 ) skip_list.insert("""Key4""" , -19 ) _SCREAMING_SNAKE_CASE : Optional[Any] = skip_list.head _SCREAMING_SNAKE_CASE : List[Any] = {} while node.level != 0: _SCREAMING_SNAKE_CASE : Dict = node.forward[0] _SCREAMING_SNAKE_CASE : Optional[Any] = node.value assert len(_UpperCamelCase ) == 4 assert all_values["Key1"] == 3 assert all_values["Key2"] == 12 assert all_values["Key3"] == 41 assert all_values["Key4"] == -19 def lowerCamelCase_( )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = SkipList() skip_list.insert("""Key1""" , 10 ) skip_list.insert("""Key1""" , 12 ) skip_list.insert("""Key5""" , 7 ) skip_list.insert("""Key7""" , 10 ) skip_list.insert("""Key10""" , 5 ) skip_list.insert("""Key7""" , 7 ) skip_list.insert("""Key5""" , 5 ) skip_list.insert("""Key10""" , 10 ) _SCREAMING_SNAKE_CASE : Any = skip_list.head _SCREAMING_SNAKE_CASE : Dict = {} while node.level != 0: _SCREAMING_SNAKE_CASE : str = node.forward[0] _SCREAMING_SNAKE_CASE : List[str] = node.value if len(_UpperCamelCase ) != 4: print() assert len(_UpperCamelCase ) == 4 assert all_values["Key1"] == 12 assert all_values["Key7"] == 7 assert all_values["Key5"] == 5 assert all_values["Key10"] == 10 def lowerCamelCase_( )-> Tuple: _SCREAMING_SNAKE_CASE : Any = SkipList() assert skip_list.find("""Some key""" ) is None def lowerCamelCase_( )-> Optional[int]: _SCREAMING_SNAKE_CASE : str = SkipList() skip_list.insert("""Key2""" , 20 ) assert skip_list.find("""Key2""" ) == 20 skip_list.insert("""Some Key""" , 10 ) skip_list.insert("""Key2""" , 8 ) skip_list.insert("""V""" , 13 ) assert skip_list.find("""Y""" ) is None assert skip_list.find("""Key2""" ) == 8 assert skip_list.find("""Some Key""" ) == 10 assert skip_list.find("""V""" ) == 13 def lowerCamelCase_( )-> Optional[Any]: _SCREAMING_SNAKE_CASE : List[str] = SkipList() skip_list.delete("""Some key""" ) assert len(skip_list.head.forward ) == 0 def lowerCamelCase_( )-> int: _SCREAMING_SNAKE_CASE : Dict = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""Key2""" ) is None def lowerCamelCase_( )-> Any: _SCREAMING_SNAKE_CASE : Tuple = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 14 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""V""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) == 14 assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""X""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) == 12 assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key1""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) == 15 skip_list.delete("""Key2""" ) assert skip_list.find("""V""" ) is None assert skip_list.find("""X""" ) is None assert skip_list.find("""Key1""" ) is None assert skip_list.find("""Key2""" ) is None def lowerCamelCase_( )-> Tuple: _SCREAMING_SNAKE_CASE : List[str] = SkipList() skip_list.insert("""Key1""" , 12 ) skip_list.insert("""V""" , 13 ) skip_list.insert("""X""" , 142 ) skip_list.insert("""Key2""" , 15 ) skip_list.delete("""X""" ) def traverse_keys(__SCREAMING_SNAKE_CASE ): yield node.key for forward_node in node.forward: yield from traverse_keys(_UpperCamelCase ) assert len(set(traverse_keys(skip_list.head ) ) ) == 4 def lowerCamelCase_( )-> List[Any]: def is_sorted(__SCREAMING_SNAKE_CASE ): return all(next_item >= item for item, next_item in zip(_UpperCamelCase , lst[1:] ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = SkipList() for i in range(10 ): skip_list.insert(_UpperCamelCase , _UpperCamelCase ) assert is_sorted(list(_UpperCamelCase ) ) skip_list.delete(5 ) skip_list.delete(8 ) skip_list.delete(2 ) assert is_sorted(list(_UpperCamelCase ) ) skip_list.insert(-12 , -12 ) skip_list.insert(77 , 77 ) assert is_sorted(list(_UpperCamelCase ) ) def lowerCamelCase_( )-> Dict: for _ in range(100 ): # Repeat test 100 times due to the probabilistic nature of skip list # random values == random bugs test_insert() test_insert_overrides_existing_value() test_searching_empty_list_returns_none() test_search() test_deleting_item_from_empty_list_do_nothing() test_deleted_items_are_not_founded_by_find_method() test_delete_removes_only_given_key() test_delete_doesnt_leave_dead_nodes() test_iter_always_yields_sorted_values() def lowerCamelCase_( )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = SkipList() skip_list.insert(2 , """2""" ) skip_list.insert(4 , """4""" ) skip_list.insert(6 , """4""" ) skip_list.insert(4 , """5""" ) skip_list.insert(8 , """4""" ) skip_list.insert(9 , """4""" ) skip_list.delete(4 ) print(_UpperCamelCase ) if __name__ == "__main__": import doctest doctest.testmod() main()
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
import os import time from dataclasses import dataclass, field from enum import Enum from typing import Dict, List, Optional, Union import torch from filelock import FileLock from torch.utils.data import Dataset from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys()) lowerCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES) @dataclass class _snake_case : """simple docstring""" a = field( default=__UpperCAmelCase , metadata={"help": "Model type selected in the list: " + ", ".join(__UpperCAmelCase )} ) a = field( default=__UpperCAmelCase , metadata={"help": "The input data dir. Should contain the .json files for the SQuAD task."} ) a = field( default=1_28 , metadata={ "help": ( "The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded." ) } , ) a = field( default=1_28 , metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."} , ) a = field( default=64 , metadata={ "help": ( "The maximum number of tokens for the question. Questions longer than this will " "be truncated to this length." ) } , ) a = field( default=30 , metadata={ "help": ( "The maximum length of an answer that can be generated. This is needed because the start " "and end predictions are not conditioned on one another." ) } , ) a = field( default=__UpperCAmelCase , metadata={"help": "Overwrite the cached training and evaluation sets"} ) a = field( default=__UpperCAmelCase , metadata={"help": "If true, the SQuAD examples contain some that do not have an answer."} ) a = field( default=0.0 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) a = field( default=20 , metadata={"help": "If null_score - best_non_null is greater than the threshold predict null."} ) a = field( default=0 , metadata={ "help": ( "language id of input for language-specific xlm models (see" " tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)" ) } , ) a = field(default=1 , metadata={"help": "multiple threads for converting example to features"} ) class _snake_case ( __UpperCAmelCase ): """simple docstring""" a = "train" a = "dev" class _snake_case ( __UpperCAmelCase ): """simple docstring""" a = 42 a = 42 a = 42 a = 42 def __init__( self : Any , _A : Dict , _A : str , _A : List[str] = None , _A : List[str] = Split.train , _A : int = False , _A : Any = None , _A : Optional[Any] = "pt" , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = args _SCREAMING_SNAKE_CASE : Optional[Any] = is_language_sensitive _SCREAMING_SNAKE_CASE : Tuple = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor() if isinstance(_A , _A): try: _SCREAMING_SNAKE_CASE : List[Any] = Split[mode] except KeyError: raise KeyError("""mode is not a valid split name""") _SCREAMING_SNAKE_CASE : Union[str, Any] = mode # Load data features from cache or dataset file _SCREAMING_SNAKE_CASE : Union[str, Any] = """v2""" if args.version_2_with_negative else """v1""" _SCREAMING_SNAKE_CASE : int = os.path.join( cache_dir if cache_dir is not None else args.data_dir , f"""cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}""" , ) # Make sure only the first process in distributed training processes the dataset, # and the others will use the cache. _SCREAMING_SNAKE_CASE : List[str] = cached_features_file + """.lock""" with FileLock(_A): if os.path.exists(_A) and not args.overwrite_cache: _SCREAMING_SNAKE_CASE : Optional[Any] = time.time() _SCREAMING_SNAKE_CASE : List[str] = torch.load(_A) # Legacy cache files have only features, while new cache files # will have dataset and examples also. _SCREAMING_SNAKE_CASE : Optional[int] = self.old_features["""features"""] _SCREAMING_SNAKE_CASE : Tuple = self.old_features.get("""dataset""" , _A) _SCREAMING_SNAKE_CASE : Tuple = self.old_features.get("""examples""" , _A) logger.info( f"""Loading features from cached file {cached_features_file} [took %.3f s]""" , time.time() - start) if self.dataset is None or self.examples is None: logger.warning( f"""Deleting cached file {cached_features_file} will allow dataset and examples to be cached in""" """ future run""") else: if mode == Split.dev: _SCREAMING_SNAKE_CASE : List[Any] = self.processor.get_dev_examples(args.data_dir) else: _SCREAMING_SNAKE_CASE : Tuple = self.processor.get_train_examples(args.data_dir) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = squad_convert_examples_to_features( examples=self.examples , tokenizer=_A , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=_A , ) _SCREAMING_SNAKE_CASE : List[str] = time.time() torch.save( {"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples} , _A , ) # ^ This seems to take a lot of time so I want to investigate why and how we can improve. logger.info( f"""Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]""") def __len__( self : str): """simple docstring""" return len(self.features) def __getitem__( self : int , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.features[i] _SCREAMING_SNAKE_CASE : Any = torch.tensor(feature.input_ids , dtype=torch.long) _SCREAMING_SNAKE_CASE : str = torch.tensor(feature.attention_mask , dtype=torch.long) _SCREAMING_SNAKE_CASE : str = torch.tensor(feature.token_type_ids , dtype=torch.long) _SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(feature.cls_index , dtype=torch.long) _SCREAMING_SNAKE_CASE : Optional[int] = torch.tensor(feature.p_mask , dtype=torch.float) _SCREAMING_SNAKE_CASE : Dict = torch.tensor(feature.is_impossible , dtype=torch.float) _SCREAMING_SNAKE_CASE : List[str] = { """input_ids""": input_ids, """attention_mask""": attention_mask, """token_type_ids""": token_type_ids, } if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]: del inputs["token_type_ids"] if self.args.model_type in ["xlnet", "xlm"]: inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask}) if self.args.version_2_with_negative: inputs.update({"""is_impossible""": is_impossible}) if self.is_language_sensitive: inputs.update({"""langs""": (torch.ones(input_ids.shape , dtype=torch.intaa) * self.args.lang_id)}) if self.mode == Split.train: _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor(feature.start_position , dtype=torch.long) _SCREAMING_SNAKE_CASE : Tuple = torch.tensor(feature.end_position , dtype=torch.long) inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions}) return inputs
717
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
0
import os import time import numpy as np import onnxruntime as ort lowerCAmelCase_ = "1" lowerCAmelCase_ = "0" lowerCAmelCase_ = "1" lowerCAmelCase_ = ort.SessionOptions() lowerCAmelCase_ = ort.GraphOptimizationLevel.ORT_DISABLE_ALL print('''Create inference session...''') lowerCAmelCase_ = ["TensorrtExecutionProvider", "CUDAExecutionProvider"] lowerCAmelCase_ = ort.InferenceSession('''model.onnx''', sess_options=sess_opt, providers=execution_provider) lowerCAmelCase_ = ort.RunOptions() lowerCAmelCase_ = 128 lowerCAmelCase_ = 1 lowerCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa) lowerCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa) lowerCAmelCase_ = np.ones((batch, sequence), dtype=np.intaa) print('''Warm up phase...''') sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Start inference...''') lowerCAmelCase_ = time.time() lowerCAmelCase_ = 2000 lowerCAmelCase_ = {} for iter in range(max_iters): lowerCAmelCase_ = sess.run( None, { sess.get_inputs()[0].name: input_ids, sess.get_inputs()[1].name: attention_mask, sess.get_inputs()[2].name: token_type_ids, }, run_options=run_opt, ) print('''Average Inference Time = {:.3f} ms'''.format((time.time() - start_time) * 1000 / max_iters))
718
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case_ ), len(grid[0] ) if ( min(snake_case_ , snake_case_ ) < 0 or row == row_length or col == col_length or (row, col) in visit or grid[row][col] == 1 ): return 0 if row == row_length - 1 and col == col_length - 1: return 1 visit.add((row, col) ) _SCREAMING_SNAKE_CASE : Any = 0 count += depth_first_search(snake_case_ , row + 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , row - 1 , snake_case_ , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col + 1 , snake_case_ ) count += depth_first_search(snake_case_ , snake_case_ , col - 1 , snake_case_ ) visit.remove((row, col) ) return count if __name__ == "__main__": import doctest doctest.testmod()
719
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { """microsoft/cvt-13""": """https://huggingface.co/microsoft/cvt-13/resolve/main/config.json""", # See all Cvt models at https://huggingface.co/models?filter=cvt } class _snake_case ( lowercase_ ): """simple docstring""" a = '''cvt''' def __init__( self : Optional[Any] , _A : Any=3 , _A : Optional[int]=[7, 3, 3] , _A : List[Any]=[4, 2, 2] , _A : int=[2, 1, 1] , _A : Optional[Any]=[6_4, 1_9_2, 3_8_4] , _A : Tuple=[1, 3, 6] , _A : List[str]=[1, 2, 1_0] , _A : List[Any]=[4.0, 4.0, 4.0] , _A : str=[0.0, 0.0, 0.0] , _A : Optional[Any]=[0.0, 0.0, 0.0] , _A : Optional[Any]=[0.0, 0.0, 0.1] , _A : Optional[int]=[True, True, True] , _A : Optional[Any]=[False, False, True] , _A : List[Any]=["dw_bn", "dw_bn", "dw_bn"] , _A : Union[str, Any]=[3, 3, 3] , _A : List[Any]=[1, 1, 1] , _A : List[str]=[2, 2, 2] , _A : Tuple=[1, 1, 1] , _A : Dict=[1, 1, 1] , _A : Union[str, Any]=0.02 , _A : Dict=1e-12 , **_A : Any , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : Tuple = num_channels _SCREAMING_SNAKE_CASE : List[str] = patch_sizes _SCREAMING_SNAKE_CASE : Dict = patch_stride _SCREAMING_SNAKE_CASE : Tuple = patch_padding _SCREAMING_SNAKE_CASE : List[Any] = embed_dim _SCREAMING_SNAKE_CASE : Union[str, Any] = num_heads _SCREAMING_SNAKE_CASE : List[Any] = depth _SCREAMING_SNAKE_CASE : Tuple = mlp_ratio _SCREAMING_SNAKE_CASE : List[Any] = attention_drop_rate _SCREAMING_SNAKE_CASE : Dict = drop_rate _SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate _SCREAMING_SNAKE_CASE : Union[str, Any] = qkv_bias _SCREAMING_SNAKE_CASE : Optional[Any] = cls_token _SCREAMING_SNAKE_CASE : int = qkv_projection_method _SCREAMING_SNAKE_CASE : List[str] = kernel_qkv _SCREAMING_SNAKE_CASE : int = padding_kv _SCREAMING_SNAKE_CASE : Any = stride_kv _SCREAMING_SNAKE_CASE : str = padding_q _SCREAMING_SNAKE_CASE : Dict = stride_q _SCREAMING_SNAKE_CASE : Dict = initializer_range _SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps
720
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
0
"""simple docstring""" import itertools import random import unittest import numpy as np from transformers import is_speech_available from transformers.testing_utils import require_torch, require_torchaudio from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin if is_speech_available(): from transformers import SpeechaTextFeatureExtractor lowerCAmelCase_ = random.Random() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None )-> List[Any]: if rng is None: _SCREAMING_SNAKE_CASE : List[Any] = global_rng _SCREAMING_SNAKE_CASE : List[str] = [] for batch_idx in range(shape[0] ): values.append([] ) for _ in range(shape[1] ): values[-1].append(rng.random() * scale ) return values @require_torch @require_torchaudio class _snake_case ( unittest.TestCase ): """simple docstring""" def __init__( self : List[Any] , _A : Tuple , _A : int=7 , _A : List[Any]=4_0_0 , _A : str=2_0_0_0 , _A : List[Any]=2_4 , _A : Tuple=2_4 , _A : Union[str, Any]=0.0 , _A : int=1_6_0_0_0 , _A : List[str]=True , _A : Union[str, Any]=True , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = parent _SCREAMING_SNAKE_CASE : Optional[int] = batch_size _SCREAMING_SNAKE_CASE : Tuple = min_seq_length _SCREAMING_SNAKE_CASE : Union[str, Any] = max_seq_length _SCREAMING_SNAKE_CASE : int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1) _SCREAMING_SNAKE_CASE : int = feature_size _SCREAMING_SNAKE_CASE : str = num_mel_bins _SCREAMING_SNAKE_CASE : Tuple = padding_value _SCREAMING_SNAKE_CASE : List[str] = sampling_rate _SCREAMING_SNAKE_CASE : int = return_attention_mask _SCREAMING_SNAKE_CASE : Dict = do_normalize def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return { "feature_size": self.feature_size, "num_mel_bins": self.num_mel_bins, "padding_value": self.padding_value, "sampling_rate": self.sampling_rate, "return_attention_mask": self.return_attention_mask, "do_normalize": self.do_normalize, } def _lowerCAmelCase ( self : List[Any] , _A : Dict=False , _A : List[str]=False): """simple docstring""" def _flatten(_A : Dict): return list(itertools.chain(*_A)) if equal_length: _SCREAMING_SNAKE_CASE : Any = [floats_list((self.max_seq_length, self.feature_size)) for _ in range(self.batch_size)] else: # make sure that inputs increase in size _SCREAMING_SNAKE_CASE : Union[str, Any] = [ floats_list((x, self.feature_size)) for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff) ] if numpify: _SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(_A) for x in speech_inputs] return speech_inputs @require_torch @require_torchaudio class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = SpeechaTextFeatureExtractor if is_speech_available() else None def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = SpeechaTextFeatureExtractionTester(self) def _lowerCAmelCase ( self : List[str] , _A : Tuple): """simple docstring""" self.assertTrue(np.all(np.mean(_A , axis=0) < 1e-3)) self.assertTrue(np.all(np.abs(np.var(_A , axis=0) - 1) < 1e-3)) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) # create three inputs of length 800, 1000, and 1200 _SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _SCREAMING_SNAKE_CASE : Dict = [np.asarray(_A) for speech_input in speech_inputs] # Test feature size _SCREAMING_SNAKE_CASE : Any = feature_extractor(_A , padding=_A , return_tensors="""np""").input_features self.assertTrue(input_features.ndim == 3) self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size) # Test not batched input _SCREAMING_SNAKE_CASE : str = feature_extractor(speech_inputs[0] , return_tensors="""np""").input_features _SCREAMING_SNAKE_CASE : str = feature_extractor(np_speech_inputs[0] , return_tensors="""np""").input_features self.assertTrue(np.allclose(_A , _A , atol=1e-3)) # Test batched _SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(_A , return_tensors="""np""").input_features _SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(_A , return_tensors="""np""").input_features for enc_seq_a, enc_seq_a in zip(_A , _A): self.assertTrue(np.allclose(_A , _A , atol=1e-3)) # Test 2-D numpy arrays are batched. _SCREAMING_SNAKE_CASE : Union[str, Any] = [floats_list((1, x))[0] for x in (8_0_0, 8_0_0, 8_0_0)] _SCREAMING_SNAKE_CASE : Optional[Any] = np.asarray(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(_A , return_tensors="""np""").input_features _SCREAMING_SNAKE_CASE : Dict = feature_extractor(_A , return_tensors="""np""").input_features for enc_seq_a, enc_seq_a in zip(_A , _A): self.assertTrue(np.allclose(_A , _A , atol=1e-3)) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _SCREAMING_SNAKE_CASE : Tuple = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _SCREAMING_SNAKE_CASE : int = ["""longest""", """max_length""", """do_not_pad"""] _SCREAMING_SNAKE_CASE : Union[str, Any] = [None, 1_6, None] for max_length, padding in zip(_A , _A): _SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor( _A , padding=_A , max_length=_A , return_attention_mask=_A) _SCREAMING_SNAKE_CASE : List[str] = inputs.input_features _SCREAMING_SNAKE_CASE : Tuple = inputs.attention_mask _SCREAMING_SNAKE_CASE : List[str] = [np.sum(_A) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _SCREAMING_SNAKE_CASE : Any = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _SCREAMING_SNAKE_CASE : str = ["""longest""", """max_length""", """do_not_pad"""] _SCREAMING_SNAKE_CASE : Tuple = [None, 1_6, None] for max_length, padding in zip(_A , _A): _SCREAMING_SNAKE_CASE : Tuple = feature_extractor( _A , max_length=_A , padding=_A , return_tensors="""np""" , return_attention_mask=_A) _SCREAMING_SNAKE_CASE : List[Any] = inputs.input_features _SCREAMING_SNAKE_CASE : str = inputs.attention_mask _SCREAMING_SNAKE_CASE : int = [np.sum(_A) for x in attention_mask] self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]]) self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]]) self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6) self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]]) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _SCREAMING_SNAKE_CASE : List[str] = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _SCREAMING_SNAKE_CASE : Dict = feature_extractor( _A , padding="""max_length""" , max_length=4 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs.input_features _SCREAMING_SNAKE_CASE : Optional[int] = inputs.attention_mask _SCREAMING_SNAKE_CASE : Any = np.sum(attention_mask == 1 , axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1]) self._check_zero_mean_unit_variance(input_features[2]) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _SCREAMING_SNAKE_CASE : int = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _SCREAMING_SNAKE_CASE : Optional[int] = feature_extractor( _A , padding="""longest""" , max_length=4 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , ) _SCREAMING_SNAKE_CASE : List[Any] = inputs.input_features _SCREAMING_SNAKE_CASE : Tuple = inputs.attention_mask _SCREAMING_SNAKE_CASE : Dict = np.sum(attention_mask == 1 , axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 4, 2_4)) _SCREAMING_SNAKE_CASE : Dict = [floats_list((1, x))[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0)] _SCREAMING_SNAKE_CASE : Tuple = feature_extractor( _A , padding="""longest""" , max_length=1_6 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , ) _SCREAMING_SNAKE_CASE : List[str] = inputs.input_features _SCREAMING_SNAKE_CASE : Optional[Any] = inputs.attention_mask _SCREAMING_SNAKE_CASE : str = np.sum(attention_mask == 1 , axis=1) self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]]) self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]]) self._check_zero_mean_unit_variance(input_features[2]) # make sure that if max_length < longest -> then pad to max_length self.assertEqual(input_features.shape , (3, 6, 2_4)) def _lowerCAmelCase ( self : List[str]): """simple docstring""" import torch _SCREAMING_SNAKE_CASE : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _SCREAMING_SNAKE_CASE : Dict = np.random.rand(1_0_0 , 3_2).astype(np.floataa) _SCREAMING_SNAKE_CASE : Any = np_speech_inputs.tolist() for inputs in [py_speech_inputs, np_speech_inputs]: _SCREAMING_SNAKE_CASE : int = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""") self.assertTrue(np_processed.input_features.dtype == np.floataa) _SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""") self.assertTrue(pt_processed.input_features.dtype == torch.floataa) def _lowerCAmelCase ( self : Tuple , _A : List[Any]): """simple docstring""" from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""") # automatic decoding with librispeech _SCREAMING_SNAKE_CASE : Union[str, Any] = ds.sort("""id""").select(range(_A))[:num_samples]["""audio"""] return [x["array"] for x in speech_samples] def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = np.array([ -1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241, -1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128, -1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625, ]) # fmt: on _SCREAMING_SNAKE_CASE : Dict = self._load_datasamples(1) _SCREAMING_SNAKE_CASE : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict()) _SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor(_A , return_tensors="""pt""").input_features self.assertEquals(input_features.shape , (1, 5_8_4, 2_4)) self.assertTrue(np.allclose(input_features[0, 0, :3_0] , _A , atol=1e-4))
721
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
0
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
700
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''facebook/vit-mae-base''': '''https://huggingface.co/facebook/vit-mae-base/resolve/main/config.json''', # See all ViT MAE models at https://huggingface.co/models?filter=vit-mae } class _snake_case ( __snake_case ): """simple docstring""" a = "vit_mae" def __init__( self : Union[str, Any] , _A : List[Any]=7_6_8 , _A : Union[str, Any]=1_2 , _A : str=1_2 , _A : int=3_0_7_2 , _A : List[Any]="gelu" , _A : Dict=0.0 , _A : int=0.0 , _A : Optional[int]=0.02 , _A : Union[str, Any]=1e-12 , _A : List[str]=2_2_4 , _A : Dict=1_6 , _A : Any=3 , _A : Optional[int]=True , _A : List[Any]=1_6 , _A : Any=5_1_2 , _A : Any=8 , _A : int=2_0_4_8 , _A : Any=0.75 , _A : Tuple=False , **_A : List[str] , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : int = hidden_size _SCREAMING_SNAKE_CASE : str = num_hidden_layers _SCREAMING_SNAKE_CASE : Union[str, Any] = num_attention_heads _SCREAMING_SNAKE_CASE : Optional[int] = intermediate_size _SCREAMING_SNAKE_CASE : List[str] = hidden_act _SCREAMING_SNAKE_CASE : Dict = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Tuple = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : List[Any] = initializer_range _SCREAMING_SNAKE_CASE : Optional[Any] = layer_norm_eps _SCREAMING_SNAKE_CASE : Optional[Any] = image_size _SCREAMING_SNAKE_CASE : Optional[Any] = patch_size _SCREAMING_SNAKE_CASE : Any = num_channels _SCREAMING_SNAKE_CASE : Optional[Any] = qkv_bias _SCREAMING_SNAKE_CASE : Optional[int] = decoder_num_attention_heads _SCREAMING_SNAKE_CASE : List[str] = decoder_hidden_size _SCREAMING_SNAKE_CASE : List[str] = decoder_num_hidden_layers _SCREAMING_SNAKE_CASE : Any = decoder_intermediate_size _SCREAMING_SNAKE_CASE : int = mask_ratio _SCREAMING_SNAKE_CASE : List[Any] = norm_pix_loss
701
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
0
"""simple docstring""" import copy import os from typing import Union from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ : Tuple = logging.get_logger(__name__) lowerCAmelCase_ : Dict = { '''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''', '''BridgeTower/bridgetower-base-itm-mlm''': ( '''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json''' ), } class _snake_case ( __snake_case ): """simple docstring""" a = "bridgetower_vision_model" def __init__( self : Union[str, Any] , _A : Union[str, Any]=7_6_8 , _A : Tuple=1_2 , _A : List[Any]=3 , _A : Dict=1_6 , _A : str=2_8_8 , _A : Optional[Any]=1 , _A : Union[str, Any]=1e-05 , _A : Any=False , _A : int=True , _A : Optional[int]=False , **_A : int , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : int = hidden_size _SCREAMING_SNAKE_CASE : int = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[int] = num_channels _SCREAMING_SNAKE_CASE : str = patch_size _SCREAMING_SNAKE_CASE : int = image_size _SCREAMING_SNAKE_CASE : Tuple = initializer_factor _SCREAMING_SNAKE_CASE : str = layer_norm_eps _SCREAMING_SNAKE_CASE : Optional[Any] = stop_gradient _SCREAMING_SNAKE_CASE : Any = share_layernorm _SCREAMING_SNAKE_CASE : int = remove_last_layer @classmethod def _lowerCAmelCase ( cls : Optional[int] , _A : Union[str, os.PathLike] , **_A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(_A , **_A) if config_dict.get("""model_type""") == "bridgetower": _SCREAMING_SNAKE_CASE : List[str] = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_A , **_A) class _snake_case ( __snake_case ): """simple docstring""" a = "bridgetower_text_model" def __init__( self : Any , _A : int=5_0_2_6_5 , _A : Union[str, Any]=7_6_8 , _A : int=1_2 , _A : Tuple=1_2 , _A : Any=1 , _A : List[Any]=3_0_7_2 , _A : str="gelu" , _A : List[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : List[str]=5_1_4 , _A : Union[str, Any]=1 , _A : str=1e-05 , _A : Dict=1 , _A : Union[str, Any]=0 , _A : Any=2 , _A : Dict="absolute" , _A : Dict=True , **_A : Any , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : int = vocab_size _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size _SCREAMING_SNAKE_CASE : Any = num_hidden_layers _SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor _SCREAMING_SNAKE_CASE : str = intermediate_size _SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings _SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size _SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps _SCREAMING_SNAKE_CASE : Any = position_embedding_type _SCREAMING_SNAKE_CASE : List[Any] = use_cache _SCREAMING_SNAKE_CASE : List[Any] = pad_token_id _SCREAMING_SNAKE_CASE : List[str] = bos_token_id _SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id @classmethod def _lowerCAmelCase ( cls : int , _A : Union[str, os.PathLike] , **_A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(_A , **_A) if config_dict.get("""model_type""") == "bridgetower": _SCREAMING_SNAKE_CASE : Any = config_dict["""text_config"""] if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type: logger.warning( f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """ f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""") return cls.from_dict(_A , **_A) class _snake_case ( __snake_case ): """simple docstring""" a = "bridgetower" def __init__( self : List[str] , _A : str=True , _A : Tuple="gelu" , _A : Optional[Any]=7_6_8 , _A : Dict=1 , _A : Tuple=1e-05 , _A : Dict=False , _A : Tuple="add" , _A : Tuple=1_2 , _A : Any=6 , _A : Union[str, Any]=False , _A : Dict=False , _A : str=None , _A : Optional[Any]=None , **_A : Optional[int] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : str = kwargs.pop("""text_config_dict""" , _A) _SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""vision_config_dict""" , _A) super().__init__(**_A) _SCREAMING_SNAKE_CASE : str = share_cross_modal_transformer_layers _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : Optional[int] = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor _SCREAMING_SNAKE_CASE : str = layer_norm_eps _SCREAMING_SNAKE_CASE : Dict = share_link_tower_layers _SCREAMING_SNAKE_CASE : Union[str, Any] = link_tower_type _SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings _SCREAMING_SNAKE_CASE : Any = init_layernorm_from_vision_encoder if text_config is None: _SCREAMING_SNAKE_CASE : Optional[Any] = {} logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""") if vision_config is None: _SCREAMING_SNAKE_CASE : Tuple = {} logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""") _SCREAMING_SNAKE_CASE : Optional[Any] = BridgeTowerTextConfig(**_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = BridgeTowerVisionConfig(**_A) @classmethod def _lowerCAmelCase ( cls : str , _A : BridgeTowerTextConfig , _A : BridgeTowerVisionConfig , **_A : Union[str, Any]): """simple docstring""" return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__) _SCREAMING_SNAKE_CASE : Tuple = self.text_config.to_dict() _SCREAMING_SNAKE_CASE : Dict = self.vision_config.to_dict() _SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type return output
702
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
703
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
0
"""simple docstring""" import argparse import json from pathlib import Path import requests import torch from huggingface_hub import cached_download, hf_hub_download, hf_hub_url from PIL import Image from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : Tuple = SwinConfig( embed_dim=192 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , ) _SCREAMING_SNAKE_CASE : Optional[Any] = DetaConfig( backbone_config=__SCREAMING_SNAKE_CASE , num_queries=900 , encoder_ffn_dim=2_048 , decoder_ffn_dim=2_048 , num_feature_levels=5 , assign_first_stage=__SCREAMING_SNAKE_CASE , with_box_refine=__SCREAMING_SNAKE_CASE , two_stage=__SCREAMING_SNAKE_CASE , ) # set labels _SCREAMING_SNAKE_CASE : List[Any] = """huggingface/label-files""" if "o365" in model_name: _SCREAMING_SNAKE_CASE : Tuple = 366 _SCREAMING_SNAKE_CASE : List[Any] = """object365-id2label.json""" else: _SCREAMING_SNAKE_CASE : Any = 91 _SCREAMING_SNAKE_CASE : Optional[Any] = """coco-detection-id2label.json""" _SCREAMING_SNAKE_CASE : Union[str, Any] = num_labels _SCREAMING_SNAKE_CASE : List[str] = json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) ) , """r""" ) ) _SCREAMING_SNAKE_CASE : Any = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} _SCREAMING_SNAKE_CASE : int = idalabel _SCREAMING_SNAKE_CASE : Union[str, Any] = {v: k for k, v in idalabel.items()} return config def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Dict = [] # stem # fmt: off rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.norm2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.reduction.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.weight""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.0.body.layers.{i}.downsample.norm.bias""", F"""model.backbone.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") ) rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") ) rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") ) rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") ) rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") ) rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") ) # transformer encoder for i in range(config.encoder_layers ): rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias""", F"""model.encoder.layers.{i}.self_attn.sampling_offsets.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.weight""", F"""model.encoder.layers.{i}.self_attn.attention_weights.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.attention_weights.bias""", F"""model.encoder.layers.{i}.self_attn.attention_weights.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.weight""", F"""model.encoder.layers.{i}.self_attn.value_proj.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.value_proj.bias""", F"""model.encoder.layers.{i}.self_attn.value_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.weight""", F"""model.encoder.layers.{i}.self_attn.output_proj.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.self_attn.output_proj.bias""", F"""model.encoder.layers.{i}.self_attn.output_proj.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.weight""", F"""model.encoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm1.bias""", F"""model.encoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.weight""", F"""model.encoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear1.bias""", F"""model.encoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.weight""", F"""model.encoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.linear2.bias""", F"""model.encoder.layers.{i}.fc2.bias""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.weight""", F"""model.encoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.encoder.layers.{i}.norm2.bias""", F"""model.encoder.layers.{i}.final_layer_norm.bias""") ) # transformer decoder for i in range(config.decoder_layers ): rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias""", F"""model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.weight""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.attention_weights.bias""", F"""model.decoder.layers.{i}.encoder_attn.attention_weights.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.value_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.value_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.weight""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.cross_attn.output_proj.bias""", F"""model.decoder.layers.{i}.encoder_attn.output_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.weight""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm1.bias""", F"""model.decoder.layers.{i}.encoder_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""", F"""model.decoder.layers.{i}.self_attn.out_proj.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", F"""model.decoder.layers.{i}.self_attn.out_proj.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.weight""", F"""model.decoder.layers.{i}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm2.bias""", F"""model.decoder.layers.{i}.self_attn_layer_norm.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.weight""", F"""model.decoder.layers.{i}.fc1.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear1.bias""", F"""model.decoder.layers.{i}.fc1.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.weight""", F"""model.decoder.layers.{i}.fc2.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.linear2.bias""", F"""model.decoder.layers.{i}.fc2.bias""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.weight""", F"""model.decoder.layers.{i}.final_layer_norm.weight""") ) rename_keys.append((F"""transformer.decoder.layers.{i}.norm3.bias""", F"""model.decoder.layers.{i}.final_layer_norm.bias""") ) # fmt: on return rename_keys def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Optional[int] = dct.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = val def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _SCREAMING_SNAKE_CASE : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = state_dict.pop(F"""backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[:dim, :] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[: dim] _SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE : Union[str, Any] = in_proj_weight[ -dim :, : ] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-dim :] # fmt: on def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: # transformer decoder self-attention layers _SCREAMING_SNAKE_CASE : int = config.d_model for i in range(config.decoder_layers ): # read in weights + bias of input projection layer of self-attention _SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""transformer.decoder.layers.{i}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Any = in_proj_weight[:hidden_size, :] _SCREAMING_SNAKE_CASE : str = in_proj_bias[:hidden_size] _SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[ hidden_size : hidden_size * 2, : ] _SCREAMING_SNAKE_CASE : int = in_proj_bias[hidden_size : hidden_size * 2] _SCREAMING_SNAKE_CASE : str = in_proj_weight[-hidden_size:, :] _SCREAMING_SNAKE_CASE : str = in_proj_bias[-hidden_size:] def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Any = """http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE : Dict = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = get_deta_config(__SCREAMING_SNAKE_CASE ) # load original state dict if model_name == "deta-swin-large": _SCREAMING_SNAKE_CASE : str = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" ) elif model_name == "deta-swin-large-o365": _SCREAMING_SNAKE_CASE : List[Any] = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" ) else: raise ValueError(F"""Model name {model_name} not supported""" ) _SCREAMING_SNAKE_CASE : List[Any] = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""] # original state dict for name, param in state_dict.items(): print(__SCREAMING_SNAKE_CASE , param.shape ) # rename keys _SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config ) read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # fix some prefixes for key in state_dict.copy().keys(): if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key: _SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = val if "input_proj" in key: _SCREAMING_SNAKE_CASE : int = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = val if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = val # finally, create HuggingFace model and load state dict _SCREAMING_SNAKE_CASE : int = DetaForObjectDetection(__SCREAMING_SNAKE_CASE ) model.load_state_dict(__SCREAMING_SNAKE_CASE ) model.eval() _SCREAMING_SNAKE_CASE : Dict = """cuda""" if torch.cuda.is_available() else """cpu""" model.to(__SCREAMING_SNAKE_CASE ) # load image processor _SCREAMING_SNAKE_CASE : Dict = DetaImageProcessor(format="""coco_detection""" ) # verify our conversion on image _SCREAMING_SNAKE_CASE : Any = prepare_img() _SCREAMING_SNAKE_CASE : Union[str, Any] = processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE : Tuple = encoding["""pixel_values"""] _SCREAMING_SNAKE_CASE : Optional[Any] = model(pixel_values.to(__SCREAMING_SNAKE_CASE ) ) # verify logits print("""Logits:""" , outputs.logits[0, :3, :3] ) print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] ) if model_name == "deta-swin-large": _SCREAMING_SNAKE_CASE : Tuple = torch.tensor( [[-7.63_08, -2.84_85, -5.37_37], [-7.20_37, -4.55_05, -4.80_27], [-7.29_43, -4.26_11, -4.66_17]] ) _SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.49_87, 0.49_69, 0.99_99], [0.25_49, 0.54_98, 0.48_05], [0.54_98, 0.27_57, 0.05_69]] ) elif model_name == "deta-swin-large-o365": _SCREAMING_SNAKE_CASE : str = torch.tensor( [[-8.01_22, -3.57_20, -4.97_17], [-8.15_47, -3.68_86, -4.63_89], [-7.66_10, -3.61_94, -5.01_34]] ) _SCREAMING_SNAKE_CASE : Dict = torch.tensor([[0.25_23, 0.55_49, 0.48_81], [0.77_15, 0.41_49, 0.46_01], [0.55_03, 0.27_53, 0.05_75]] ) assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(__SCREAMING_SNAKE_CASE ) , atol=1e-4 ) assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(__SCREAMING_SNAKE_CASE ) , atol=1e-4 ) print("""Everything ok!""" ) if pytorch_dump_folder_path: # Save model and processor logger.info(F"""Saving PyTorch model and processor to {pytorch_dump_folder_path}...""" ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) # Push to hub if push_to_hub: print("""Pushing model and processor to hub...""" ) model.push_to_hub(F"""jozhang97/{model_name}""" ) processor.push_to_hub(F"""jozhang97/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument( '''--model_name''', type=str, default='''deta-swin-large''', choices=['''deta-swin-large''', '''deta-swin-large-o365'''], help='''Name of the model you\'d like to convert.''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.''', ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
"""simple docstring""" import inspect import warnings from typing import Any, Dict, Optional, Union from packaging import version def lowerCamelCase_(*__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=2 )-> Dict: from .. import __version__ _SCREAMING_SNAKE_CASE : int = take_from _SCREAMING_SNAKE_CASE : Union[str, Any] = () if not isinstance(args[0] , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : int = (args,) for attribute, version_name, message in args: if version.parse(version.parse(__SCREAMING_SNAKE_CASE ).base_version ) >= version.parse(__SCREAMING_SNAKE_CASE ): raise ValueError( F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'""" F""" version {__version__} is >= {version_name}""" ) _SCREAMING_SNAKE_CASE : str = None if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and attribute in deprecated_kwargs: values += (deprecated_kwargs.pop(__SCREAMING_SNAKE_CASE ),) _SCREAMING_SNAKE_CASE : str = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}.""" elif hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): values += (getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),) _SCREAMING_SNAKE_CASE : Optional[int] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}.""" elif deprecated_kwargs is None: _SCREAMING_SNAKE_CASE : Tuple = F"""`{attribute}` is deprecated and will be removed in version {version_name}.""" if warning is not None: _SCREAMING_SNAKE_CASE : int = warning + """ """ if standard_warn else """""" warnings.warn(warning + message , __SCREAMING_SNAKE_CASE , stacklevel=__SCREAMING_SNAKE_CASE ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) and len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : Union[str, Any] = inspect.getouterframes(inspect.currentframe() )[1] _SCREAMING_SNAKE_CASE : Dict = call_frame.filename _SCREAMING_SNAKE_CASE : Union[str, Any] = call_frame.lineno _SCREAMING_SNAKE_CASE : str = call_frame.function _SCREAMING_SNAKE_CASE : List[Any] = next(iter(deprecated_kwargs.items() ) ) raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" ) if len(__SCREAMING_SNAKE_CASE ) == 0: return elif len(__SCREAMING_SNAKE_CASE ) == 1: return values[0] return values
705
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
0
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
706
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
0
"""simple docstring""" import os import tempfile import unittest from pathlib import Path from transformers import AutoConfig, is_tf_available from transformers.testing_utils import require_tf if is_tf_available(): import tensorflow as tf from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments @require_tf class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int] , _A : List[Any]): """simple docstring""" for model_result in results.values(): for batch_size, sequence_length in zip(model_result["""bs"""] , model_result["""ss"""]): _SCREAMING_SNAKE_CASE : Union[str, Any] = model_result["""result"""][batch_size][sequence_length] self.assertIsNotNone(_A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Any = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = """sgugger/tiny-distilbert-classification""" _SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , ) _SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Dict = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : Dict = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : int = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , eager_mode=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(_A , [config]) _SCREAMING_SNAKE_CASE : Tuple = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Tuple = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A , [config]) _SCREAMING_SNAKE_CASE : List[str] = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : str = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Any = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : List[str] = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : str = TensorFlowBenchmark(_A , [config]) _SCREAMING_SNAKE_CASE : str = benchmark.run() self.check_results_dict_not_empty(results.time_train_result) self.check_results_dict_not_empty(results.memory_train_result) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = """patrickvonplaten/t5-tiny-random""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoConfig.from_pretrained(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , ) _SCREAMING_SNAKE_CASE : List[str] = TensorFlowBenchmark(_A , configs=[config]) _SCREAMING_SNAKE_CASE : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) @unittest.skipIf(is_tf_available() and len(tf.config.list_physical_devices("""GPU""")) == 0 , """Cannot do xla on CPU.""") def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = """sshleifer/tiny-gpt2""" _SCREAMING_SNAKE_CASE : Dict = TensorFlowBenchmarkArguments( models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , use_xla=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : str = benchmark.run() self.check_results_dict_not_empty(results.time_inference_result) self.check_results_dict_not_empty(results.memory_inference_result) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : int = """sshleifer/tiny-gpt2""" with tempfile.TemporaryDirectory() as tmp_dir: _SCREAMING_SNAKE_CASE : Optional[int] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , """inf_time.csv""") , inference_memory_csv_file=os.path.join(_A , """inf_mem.csv""") , env_info_csv_file=os.path.join(_A , """env.csv""") , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Optional[Any] = TensorFlowBenchmark(_A) benchmark.run() self.assertTrue(Path(os.path.join(_A , """inf_time.csv""")).exists()) self.assertTrue(Path(os.path.join(_A , """inf_mem.csv""")).exists()) self.assertTrue(Path(os.path.join(_A , """env.csv""")).exists()) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """sshleifer/tiny-gpt2""" def _check_summary_is_not_empty(_A : Any): self.assertTrue(hasattr(_A , """sequential""")) self.assertTrue(hasattr(_A , """cumulative""")) self.assertTrue(hasattr(_A , """current""")) self.assertTrue(hasattr(_A , """total""")) with tempfile.TemporaryDirectory() as tmp_dir: _SCREAMING_SNAKE_CASE : List[Any] = TensorFlowBenchmarkArguments( models=[MODEL_ID] , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , """log.txt""") , log_print=_A , trace_memory_line_by_line=_A , eager_mode=_A , multi_process=_A , ) _SCREAMING_SNAKE_CASE : Any = TensorFlowBenchmark(_A) _SCREAMING_SNAKE_CASE : Tuple = benchmark.run() _check_summary_is_not_empty(result.inference_summary) self.assertTrue(Path(os.path.join(_A , """log.txt""")).exists())
707
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
0
"""simple docstring""" import numpy as np from cva import COLOR_BGR2GRAY, cvtColor, imread from numpy import array, uinta from PIL import Image from digital_image_processing import change_contrast as cc from digital_image_processing import convert_to_negative as cn from digital_image_processing import sepia as sp from digital_image_processing.dithering import burkes as bs from digital_image_processing.edge_detection import canny from digital_image_processing.filters import convolve as conv from digital_image_processing.filters import gaussian_filter as gg from digital_image_processing.filters import local_binary_pattern as lbp from digital_image_processing.filters import median_filter as med from digital_image_processing.filters import sobel_filter as sob from digital_image_processing.resize import resize as rs lowerCAmelCase_ = imread(R'''digital_image_processing/image_data/lena_small.jpg''') lowerCAmelCase_ = cvtColor(img, COLOR_BGR2GRAY) def lowerCamelCase_()-> str: _SCREAMING_SNAKE_CASE : int = cn.convert_to_negative(__SCREAMING_SNAKE_CASE ) # assert negative_img array for at least one True assert negative_img.any() def lowerCamelCase_()-> Dict: with Image.open("""digital_image_processing/image_data/lena_small.jpg""" ) as img: # Work around assertion for response assert str(cc.change_contrast(__SCREAMING_SNAKE_CASE , 110 ) ).startswith( """<PIL.Image.Image image mode=RGB size=100x100 at""" ) def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : List[Any] = canny.gen_gaussian_kernel(9 , sigma=1.4 ) # Assert ambiguous array assert resp.all() def lowerCamelCase_()-> int: _SCREAMING_SNAKE_CASE : str = imread("""digital_image_processing/image_data/lena_small.jpg""" , 0 ) # assert ambiguous array for all == True assert canny_img.all() _SCREAMING_SNAKE_CASE : Dict = canny.canny(__SCREAMING_SNAKE_CASE ) # assert canny array for at least one True assert canny_array.any() def lowerCamelCase_()-> List[str]: assert gg.gaussian_filter(__SCREAMING_SNAKE_CASE , 5 , sigma=0.9 ).all() def lowerCamelCase_()-> List[str]: # laplace diagonals _SCREAMING_SNAKE_CASE : Dict = array([[0.25, 0.5, 0.25], [0.5, -3, 0.5], [0.25, 0.5, 0.25]] ) _SCREAMING_SNAKE_CASE : int = conv.img_convolve(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).astype(__SCREAMING_SNAKE_CASE ) assert res.any() def lowerCamelCase_()-> str: assert med.median_filter(__SCREAMING_SNAKE_CASE , 3 ).any() def lowerCamelCase_()-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = sob.sobel_filter(__SCREAMING_SNAKE_CASE ) assert grad.any() and theta.any() def lowerCamelCase_()-> str: _SCREAMING_SNAKE_CASE : Optional[int] = sp.make_sepia(__SCREAMING_SNAKE_CASE , 20 ) assert sepia.all() def lowerCamelCase_(__SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" )-> str: _SCREAMING_SNAKE_CASE : Any = bs.Burkes(imread(__SCREAMING_SNAKE_CASE , 1 ) , 120 ) burkes.process() assert burkes.output_img.any() def lowerCamelCase_(__SCREAMING_SNAKE_CASE = "digital_image_processing/image_data/lena_small.jpg" , )-> str: _SCREAMING_SNAKE_CASE : Union[str, Any] = rs.NearestNeighbour(imread(__SCREAMING_SNAKE_CASE , 1 ) , 400 , 200 ) nn.process() assert nn.output.any() def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Union[str, Any] = """digital_image_processing/image_data/lena.jpg""" # Reading the image and converting it to grayscale. _SCREAMING_SNAKE_CASE : Tuple = imread(__SCREAMING_SNAKE_CASE , 0 ) # Test for get_neighbors_pixel function() return not None _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Tuple = 0 _SCREAMING_SNAKE_CASE : Optional[int] = image[x_coordinate][y_coordinate] _SCREAMING_SNAKE_CASE : int = lbp.get_neighbors_pixel( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert neighbors_pixels is not None # Test for local_binary_pattern function() # Create a numpy array as the same height and width of read image _SCREAMING_SNAKE_CASE : Any = np.zeros((image.shape[0], image.shape[1]) ) # Iterating through the image and calculating the local binary pattern value # for each pixel. for i in range(0 , image.shape[0] ): for j in range(0 , image.shape[1] ): _SCREAMING_SNAKE_CASE : Optional[Any] = lbp.local_binary_value(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert lbp_image.any()
708
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
0
"""simple docstring""" from __future__ import annotations import inspect import unittest import numpy as np from transformers import DeiTConfig from transformers.testing_utils import require_tf, require_vision, slow from transformers.utils import cached_property, is_tf_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_tf_available(): import tensorflow as tf from transformers import ( TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, TFDeiTModel, ) from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import DeiTImageProcessor class _snake_case : """simple docstring""" def __init__( self : Tuple , _A : Optional[Any] , _A : int=1_3 , _A : Any=3_0 , _A : Optional[Any]=2 , _A : int=3 , _A : List[str]=True , _A : Optional[int]=True , _A : List[str]=3_2 , _A : Optional[Any]=2 , _A : List[str]=4 , _A : int=3_7 , _A : Any="gelu" , _A : List[Any]=0.1 , _A : int=0.1 , _A : List[Any]=1_0 , _A : str=0.02 , _A : Union[str, Any]=3 , _A : Any=None , _A : List[str]=2 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = parent _SCREAMING_SNAKE_CASE : Optional[int] = batch_size _SCREAMING_SNAKE_CASE : Tuple = image_size _SCREAMING_SNAKE_CASE : Union[str, Any] = patch_size _SCREAMING_SNAKE_CASE : List[Any] = num_channels _SCREAMING_SNAKE_CASE : Any = is_training _SCREAMING_SNAKE_CASE : Any = use_labels _SCREAMING_SNAKE_CASE : int = hidden_size _SCREAMING_SNAKE_CASE : List[str] = num_hidden_layers _SCREAMING_SNAKE_CASE : Any = num_attention_heads _SCREAMING_SNAKE_CASE : int = intermediate_size _SCREAMING_SNAKE_CASE : Tuple = hidden_act _SCREAMING_SNAKE_CASE : Tuple = hidden_dropout_prob _SCREAMING_SNAKE_CASE : List[str] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Any = type_sequence_label_size _SCREAMING_SNAKE_CASE : Dict = initializer_range _SCREAMING_SNAKE_CASE : Optional[int] = scope _SCREAMING_SNAKE_CASE : List[str] = encoder_stride # in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens) _SCREAMING_SNAKE_CASE : Any = (image_size // patch_size) ** 2 _SCREAMING_SNAKE_CASE : List[str] = num_patches + 2 def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: _SCREAMING_SNAKE_CASE : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : str = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : Tuple): """simple docstring""" return DeiTConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , ) def _lowerCAmelCase ( self : Tuple , _A : Any , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = TFDeiTModel(config=_A) _SCREAMING_SNAKE_CASE : Tuple = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowerCAmelCase ( self : List[Any] , _A : Optional[Any] , _A : Union[str, Any] , _A : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = TFDeiTForMaskedImageModeling(config=_A) _SCREAMING_SNAKE_CASE : Dict = model(_A) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = TFDeiTForMaskedImageModeling(_A) _SCREAMING_SNAKE_CASE : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : int = model(_A) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def _lowerCAmelCase ( self : Optional[int] , _A : Optional[int] , _A : Union[str, Any] , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.type_sequence_label_size _SCREAMING_SNAKE_CASE : Any = TFDeiTForImageClassification(_A) _SCREAMING_SNAKE_CASE : str = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _SCREAMING_SNAKE_CASE : Dict = 1 _SCREAMING_SNAKE_CASE : str = TFDeiTForImageClassification(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Optional[Any] = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Dict = config_and_inputs _SCREAMING_SNAKE_CASE : Optional[Any] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_tf class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = ( ( TFDeiTModel, TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher, TFDeiTForMaskedImageModeling, ) if is_tf_available() else () ) a = ( { "feature-extraction": TFDeiTModel, "image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher), } if is_tf_available() else {} ) a = False a = False a = False a = False def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = TFDeiTModelTester(self) _SCREAMING_SNAKE_CASE : int = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""DeiT does not use inputs_embeds""") def _lowerCAmelCase ( self : int): """simple docstring""" pass def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : str = model_class(_A) self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer)) _SCREAMING_SNAKE_CASE : List[str] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , tf.keras.layers.Dense)) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Any = model_class(_A) _SCREAMING_SNAKE_CASE : Any = inspect.signature(model.call) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : Optional[Any] = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) def _lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : Union[str, Any] , _A : Union[str, Any]=False): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = super()._prepare_for_class(_A , _A , return_labels=_A) if return_labels: if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call).parameters: del inputs_dict["labels"] return inputs_dict @slow def _lowerCAmelCase ( self : Any): """simple docstring""" for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Optional[Any] = TFDeiTModel.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCamelCase_()-> str: _SCREAMING_SNAKE_CASE : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_tf @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : str): """simple docstring""" return ( DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""") if is_vision_available() else None ) @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""") _SCREAMING_SNAKE_CASE : Dict = self.default_image_processor _SCREAMING_SNAKE_CASE : Any = prepare_img() _SCREAMING_SNAKE_CASE : List[str] = image_processor(images=_A , return_tensors="""tf""") # forward pass _SCREAMING_SNAKE_CASE : List[Any] = model(**_A) # verify the logits _SCREAMING_SNAKE_CASE : Optional[Any] = tf.TensorShape((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = tf.constant([-1.0_266, 0.1_912, -1.2_861]) self.assertTrue(np.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
709
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
0
"""simple docstring""" import argparse import json import pickle from pathlib import Path import requests import torch from huggingface_hub import hf_hub_download from PIL import Image from transformers import MaskFormerConfig, MaskFormerForInstanceSegmentation, MaskFormerImageProcessor, SwinConfig from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : int = SwinConfig.from_pretrained( """microsoft/swin-tiny-patch4-window7-224""" , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] ) _SCREAMING_SNAKE_CASE : List[Any] = MaskFormerConfig(backbone_config=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = """huggingface/label-files""" if "ade20k-full" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : List[Any] = 847 _SCREAMING_SNAKE_CASE : str = """maskformer-ade20k-full-id2label.json""" elif "ade" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : Union[str, Any] = 150 _SCREAMING_SNAKE_CASE : Optional[int] = """ade20k-id2label.json""" elif "coco-stuff" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : Dict = 171 _SCREAMING_SNAKE_CASE : int = """maskformer-coco-stuff-id2label.json""" elif "coco" in model_name: # TODO _SCREAMING_SNAKE_CASE : str = 133 _SCREAMING_SNAKE_CASE : List[str] = """coco-panoptic-id2label.json""" elif "cityscapes" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : int = 19 _SCREAMING_SNAKE_CASE : Union[str, Any] = """cityscapes-id2label.json""" elif "vistas" in model_name: # this should be ok _SCREAMING_SNAKE_CASE : Optional[int] = 65 _SCREAMING_SNAKE_CASE : Tuple = """mapillary-vistas-id2label.json""" _SCREAMING_SNAKE_CASE : Union[str, Any] = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) ) _SCREAMING_SNAKE_CASE : List[str] = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()} return config def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : str = [] # stem # fmt: off rename_keys.append(("""backbone.patch_embed.proj.weight""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.weight""") ) rename_keys.append(("""backbone.patch_embed.proj.bias""", """model.pixel_level_module.encoder.model.embeddings.patch_embeddings.projection.bias""") ) rename_keys.append(("""backbone.patch_embed.norm.weight""", """model.pixel_level_module.encoder.model.embeddings.norm.weight""") ) rename_keys.append(("""backbone.patch_embed.norm.bias""", """model.pixel_level_module.encoder.model.embeddings.norm.bias""") ) # stages for i in range(len(config.backbone_config.depths ) ): for j in range(config.backbone_config.depths[i] ): rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_bias_table""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.relative_position_index""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.attn.proj.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.norm2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc1.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.weight""") ) rename_keys.append((F"""backbone.layers.{i}.blocks.{j}.mlp.fc2.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.blocks.{j}.output.dense.bias""") ) if i < 3: rename_keys.append((F"""backbone.layers.{i}.downsample.reduction.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.reduction.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.weight""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.weight""") ) rename_keys.append((F"""backbone.layers.{i}.downsample.norm.bias""", F"""model.pixel_level_module.encoder.model.encoder.layers.{i}.downsample.norm.bias""") ) rename_keys.append((F"""backbone.norm{i}.weight""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.weight""") ) rename_keys.append((F"""backbone.norm{i}.bias""", F"""model.pixel_level_module.encoder.hidden_states_norms.{i}.bias""") ) # FPN rename_keys.append(("""sem_seg_head.layer_4.weight""", """model.pixel_level_module.decoder.fpn.stem.0.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.weight""", """model.pixel_level_module.decoder.fpn.stem.1.weight""") ) rename_keys.append(("""sem_seg_head.layer_4.norm.bias""", """model.pixel_level_module.decoder.fpn.stem.1.bias""") ) for source_index, target_index in zip(range(3 , 0 , -1 ) , range(0 , 3 ) ): rename_keys.append((F"""sem_seg_head.adapter_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.0.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.weight""") ) rename_keys.append((F"""sem_seg_head.adapter_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.proj.1.bias""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.0.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.weight""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.weight""") ) rename_keys.append((F"""sem_seg_head.layer_{source_index}.norm.bias""", F"""model.pixel_level_module.decoder.fpn.layers.{target_index}.block.1.bias""") ) rename_keys.append(("""sem_seg_head.mask_features.weight""", """model.pixel_level_module.decoder.mask_projection.weight""") ) rename_keys.append(("""sem_seg_head.mask_features.bias""", """model.pixel_level_module.decoder.mask_projection.bias""") ) # Transformer decoder for idx in range(config.decoder_config.decoder_layers ): # self-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn.out_proj.bias""") ) # cross-attention out projection rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.out_proj.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn.out_proj.bias""") ) # MLP 1 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc1.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear1.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc1.bias""") ) # MLP 2 rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.weight""", F"""model.transformer_module.decoder.layers.{idx}.fc2.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.linear2.bias""", F"""model.transformer_module.decoder.layers.{idx}.fc2.bias""") ) # layernorm 1 (self-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.weight""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm1.bias""", F"""model.transformer_module.decoder.layers.{idx}.self_attn_layer_norm.bias""") ) # layernorm 2 (cross-attention layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.weight""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm2.bias""", F"""model.transformer_module.decoder.layers.{idx}.encoder_attn_layer_norm.bias""") ) # layernorm 3 (final layernorm) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.weight""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.norm3.bias""", F"""model.transformer_module.decoder.layers.{idx}.final_layer_norm.bias""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.weight""", """model.transformer_module.decoder.layernorm.weight""") ) rename_keys.append(("""sem_seg_head.predictor.transformer.decoder.norm.bias""", """model.transformer_module.decoder.layernorm.bias""") ) # heads on top rename_keys.append(("""sem_seg_head.predictor.query_embed.weight""", """model.transformer_module.queries_embedder.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.weight""", """model.transformer_module.input_projection.weight""") ) rename_keys.append(("""sem_seg_head.predictor.input_proj.bias""", """model.transformer_module.input_projection.bias""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.weight""", """class_predictor.weight""") ) rename_keys.append(("""sem_seg_head.predictor.class_embed.bias""", """class_predictor.bias""") ) for i in range(3 ): rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.weight""", F"""mask_embedder.{i}.0.weight""") ) rename_keys.append((F"""sem_seg_head.predictor.mask_embed.layers.{i}.bias""", F"""mask_embedder.{i}.0.bias""") ) # fmt: on return rename_keys def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : str = dct.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = val def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )] for i in range(len(backbone_config.depths ) ): _SCREAMING_SNAKE_CASE : Optional[Any] = num_features[i] for j in range(backbone_config.depths[i] ): # fmt: off # read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : List[str] = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.weight""" ) _SCREAMING_SNAKE_CASE : int = state_dict.pop(F"""backbone.layers.{i}.blocks.{j}.attn.qkv.bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : List[str] = in_proj_weight[:dim, :] _SCREAMING_SNAKE_CASE : Tuple = in_proj_bias[: dim] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[ dim : dim * 2, : ] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[ dim : dim * 2 ] _SCREAMING_SNAKE_CASE : int = in_proj_weight[ -dim :, : ] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-dim :] # fmt: on def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: # fmt: off _SCREAMING_SNAKE_CASE : List[str] = config.decoder_config.hidden_size for idx in range(config.decoder_config.decoder_layers ): # read in weights + bias of self-attention input projection layer (in the original implementation, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : Any = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : Optional[int] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.self_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[: hidden_size, :] _SCREAMING_SNAKE_CASE : str = in_proj_bias[:config.hidden_size] _SCREAMING_SNAKE_CASE : List[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] _SCREAMING_SNAKE_CASE : List[str] = in_proj_bias[hidden_size : hidden_size * 2] _SCREAMING_SNAKE_CASE : str = in_proj_weight[-hidden_size :, :] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_bias[-hidden_size :] # read in weights + bias of cross-attention input projection layer (in the original implementation, this is a single matrix + bias) _SCREAMING_SNAKE_CASE : List[Any] = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_weight""" ) _SCREAMING_SNAKE_CASE : str = state_dict.pop(F"""sem_seg_head.predictor.transformer.decoder.layers.{idx}.multihead_attn.in_proj_bias""" ) # next, add query, keys and values (in that order) to the state dict _SCREAMING_SNAKE_CASE : Tuple = in_proj_weight[: hidden_size, :] _SCREAMING_SNAKE_CASE : Dict = in_proj_bias[:config.hidden_size] _SCREAMING_SNAKE_CASE : Optional[Any] = in_proj_weight[hidden_size : hidden_size * 2, :] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[hidden_size : hidden_size * 2] _SCREAMING_SNAKE_CASE : Optional[int] = in_proj_weight[-hidden_size :, :] _SCREAMING_SNAKE_CASE : Any = in_proj_bias[-hidden_size :] # fmt: on def lowerCamelCase_()-> torch.Tensor: _SCREAMING_SNAKE_CASE : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg""" _SCREAMING_SNAKE_CASE : List[str] = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ) return im @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False )-> int: _SCREAMING_SNAKE_CASE : List[Any] = get_maskformer_config(__SCREAMING_SNAKE_CASE ) # load original state_dict with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = pickle.load(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = data["""model"""] # for name, param in state_dict.items(): # print(name, param.shape) # rename keys _SCREAMING_SNAKE_CASE : Tuple = create_rename_keys(__SCREAMING_SNAKE_CASE ) for src, dest in rename_keys: rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) read_in_swin_q_k_v(__SCREAMING_SNAKE_CASE , config.backbone_config ) read_in_decoder_q_k_v(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # update to torch tensors for key, value in state_dict.items(): _SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(__SCREAMING_SNAKE_CASE ) # load 🤗 model _SCREAMING_SNAKE_CASE : Tuple = MaskFormerForInstanceSegmentation(__SCREAMING_SNAKE_CASE ) model.eval() for name, param in model.named_parameters(): print(__SCREAMING_SNAKE_CASE , param.shape ) _SCREAMING_SNAKE_CASE : str = model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) assert missing_keys == [ "model.pixel_level_module.encoder.model.layernorm.weight", "model.pixel_level_module.encoder.model.layernorm.bias", ] assert len(__SCREAMING_SNAKE_CASE ) == 0, F"""Unexpected keys: {unexpected_keys}""" # verify results _SCREAMING_SNAKE_CASE : str = prepare_img() if "vistas" in model_name: _SCREAMING_SNAKE_CASE : List[str] = 65 elif "cityscapes" in model_name: _SCREAMING_SNAKE_CASE : Dict = 65_535 else: _SCREAMING_SNAKE_CASE : Tuple = 255 _SCREAMING_SNAKE_CASE : str = True if """ade""" in model_name else False _SCREAMING_SNAKE_CASE : int = MaskFormerImageProcessor(ignore_index=__SCREAMING_SNAKE_CASE , reduce_labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) print("""Logits:""" , outputs.class_queries_logits[0, :3, :3] ) if model_name == "maskformer-swin-tiny-ade": _SCREAMING_SNAKE_CASE : int = torch.tensor( [[3.63_53, -4.47_70, -2.60_65], [0.50_81, -4.23_94, -3.53_43], [2.19_09, -5.03_53, -1.93_23]] ) assert torch.allclose(outputs.class_queries_logits[0, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1e-4 ) print("""Looks ok!""" ) if pytorch_dump_folder_path is not None: print(F"""Saving model and image processor to {pytorch_dump_folder_path}""" ) Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) image_processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if push_to_hub: print("""Pushing model and image processor to the hub...""" ) model.push_to_hub(F"""nielsr/{model_name}""" ) image_processor.push_to_hub(F"""nielsr/{model_name}""" ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--model_name''', default='''maskformer-swin-tiny-ade''', type=str, help=('''Name of the MaskFormer model you\'d like to convert''',), ) parser.add_argument( '''--checkpoint_path''', default='''/Users/nielsrogge/Documents/MaskFormer_checkpoints/MaskFormer-Swin-tiny-ADE20k/model.pkl''', type=str, help='''Path to the original state dict (.pth file).''', ) parser.add_argument( '''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.''' ) parser.add_argument( '''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.''' ) lowerCAmelCase_ = parser.parse_args() convert_maskformer_checkpoint( args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub )
710
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
0
import logging import os import sys import warnings from dataclasses import dataclass, field from random import randint from typing import Optional import datasets import evaluate import numpy as np from datasets import DatasetDict, load_dataset import transformers from transformers import ( AutoConfig, AutoFeatureExtractor, AutoModelForAudioClassification, HfArgumentParser, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint from transformers.utils import check_min_version, send_example_telemetry from transformers.utils.versions import require_version lowerCAmelCase_ = logging.getLogger(__name__) # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version('''4.31.0''') require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16_000 )-> List[Any]: _SCREAMING_SNAKE_CASE : Tuple = int(round(sample_rate * max_length ) ) if len(__SCREAMING_SNAKE_CASE ) <= sample_length: return wav _SCREAMING_SNAKE_CASE : int = randint(0 , len(__SCREAMING_SNAKE_CASE ) - sample_length - 1 ) return wav[random_offset : random_offset + sample_length] @dataclass class _snake_case : """simple docstring""" a = field(default=__snake_case , metadata={"help": "Name of a dataset from the datasets package"} ) a = field( default=__snake_case , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) a = field( default=__snake_case , metadata={"help": "A file containing the training audio paths and labels."} ) a = field( default=__snake_case , metadata={"help": "A file containing the validation audio paths and labels."} ) a = field( default="train" , metadata={ "help": "The name of the training data set split to use (via the datasets library). Defaults to 'train'" } , ) a = field( default="validation" , metadata={ "help": ( "The name of the training data set split to use (via the datasets library). Defaults to 'validation'" ) } , ) a = field( default="audio" , metadata={"help": "The name of the dataset column containing the audio data. Defaults to 'audio'"} , ) a = field( default="label" , metadata={"help": "The name of the dataset column containing the labels. Defaults to 'label'"} ) a = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." ) } , ) a = field( default=__snake_case , metadata={ "help": ( "For debugging purposes or quicker training, truncate the number of evaluation examples to this " "value if set." ) } , ) a = field( default=20 , metadata={"help": "Audio clips will be randomly cut to this length during training if the value is set."} , ) @dataclass class _snake_case : """simple docstring""" a = field( default="facebook/wav2vec2-base" , metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} , ) a = field( default=__snake_case , metadata={"help": "Pretrained config name or path if not the same as model_name"} ) a = field( default=__snake_case , metadata={"help": "Where do you want to store the pretrained models downloaded from the Hub"} ) a = field( default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , ) a = field( default=__snake_case , metadata={"help": "Name or path of preprocessor config."} ) a = field( default=__snake_case , metadata={"help": "Whether to freeze the feature encoder layers of the model."} ) a = field( default=__snake_case , metadata={"help": "Whether to generate an attention mask in the feature extractor."} ) a = field( default=__snake_case , metadata={ "help": ( "Will use the token generated when running `huggingface-cli login` (necessary to use this script " "with private models)." ) } , ) a = field( default=__snake_case , metadata={"help": "Whether to freeze the feature extractor layers of the model."} ) a = field( default=__snake_case , metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} , ) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if not self.freeze_feature_extractor and self.freeze_feature_encoder: warnings.warn( """The argument `--freeze_feature_extractor` is deprecated and """ """will be removed in a future version. Use `--freeze_feature_encoder`""" """instead. Setting `freeze_feature_encoder==True`.""" , _A , ) if self.freeze_feature_extractor and not self.freeze_feature_encoder: raise ValueError( """The argument `--freeze_feature_extractor` is deprecated and """ """should not be used in combination with `--freeze_feature_encoder`.""" """Only make use of `--freeze_feature_encoder`.""") def lowerCamelCase_()-> List[Any]: # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. _SCREAMING_SNAKE_CASE : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) ) if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. _SCREAMING_SNAKE_CASE : Tuple = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) ) else: _SCREAMING_SNAKE_CASE : Tuple = parser.parse_args_into_dataclasses() # Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The # information sent is the one passed as arguments along with your Python/PyTorch versions. send_example_telemetry("""run_audio_classification""" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Setup logging logging.basicConfig( format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , ) if training_args.should_log: # The default of training_args.log_level is passive, so we set log level at info here to have that default. transformers.utils.logging.set_verbosity_info() _SCREAMING_SNAKE_CASE : Tuple = training_args.get_process_log_level() logger.setLevel(__SCREAMING_SNAKE_CASE ) transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE ) transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Log on each process the small summary: logger.warning( F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} """ + F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" ) logger.info(F"""Training/evaluation parameters {training_args}""" ) # Set seed before initializing model. set_seed(training_args.seed ) # Detecting last checkpoint. _SCREAMING_SNAKE_CASE : str = None if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir: _SCREAMING_SNAKE_CASE : int = get_last_checkpoint(training_args.output_dir ) if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0: raise ValueError( F"""Output directory ({training_args.output_dir}) already exists and is not empty. """ """Use --overwrite_output_dir to train from scratch.""" ) elif last_checkpoint is not None and training_args.resume_from_checkpoint is None: logger.info( F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """ """the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" ) # Initialize our dataset and prepare it for the audio classification task. _SCREAMING_SNAKE_CASE : Any = DatasetDict() _SCREAMING_SNAKE_CASE : str = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , ) _SCREAMING_SNAKE_CASE : List[str] = load_dataset( data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , ) if data_args.audio_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--audio_column_name {data_args.audio_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--audio_column_name` to the correct audio column - one of """ F"""{", ".join(raw_datasets["train"].column_names )}.""" ) if data_args.label_column_name not in raw_datasets["train"].column_names: raise ValueError( F"""--label_column_name {data_args.label_column_name} not found in dataset '{data_args.dataset_name}'. """ """Make sure to set `--label_column_name` to the correct text column - one of """ F"""{", ".join(raw_datasets["train"].column_names )}.""" ) # Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over # transformer outputs in the classifier, but it doesn't always lead to better accuracy _SCREAMING_SNAKE_CASE : Optional[int] = AutoFeatureExtractor.from_pretrained( model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) # `datasets` takes care of automatically loading and resampling the audio, # so we just need to set the correct target sampling rate. _SCREAMING_SNAKE_CASE : int = raw_datasets.cast_column( data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = feature_extractor.model_input_names[0] def train_transforms(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = [] for audio in batch[data_args.audio_column_name]: _SCREAMING_SNAKE_CASE : Dict = random_subsample( audio["""array"""] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate ) subsampled_wavs.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _SCREAMING_SNAKE_CASE : Tuple = {model_input_name: inputs.get(__SCREAMING_SNAKE_CASE )} _SCREAMING_SNAKE_CASE : List[Any] = list(batch[data_args.label_column_name] ) return output_batch def val_transforms(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = [audio["""array"""] for audio in batch[data_args.audio_column_name]] _SCREAMING_SNAKE_CASE : List[str] = feature_extractor(__SCREAMING_SNAKE_CASE , sampling_rate=feature_extractor.sampling_rate ) _SCREAMING_SNAKE_CASE : Optional[Any] = {model_input_name: inputs.get(__SCREAMING_SNAKE_CASE )} _SCREAMING_SNAKE_CASE : List[str] = list(batch[data_args.label_column_name] ) return output_batch # Prepare label mappings. # We'll include these in the model's config to get human readable labels in the Inference API. _SCREAMING_SNAKE_CASE : Optional[int] = raw_datasets["""train"""].features[data_args.label_column_name].names _SCREAMING_SNAKE_CASE : List[str] = {}, {} for i, label in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : int = str(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = label # Load the accuracy metric from the datasets package _SCREAMING_SNAKE_CASE : Tuple = evaluate.load("""accuracy""" ) # Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with # `predictions` and `label_ids` fields) and has to return a dictionary string to float. def compute_metrics(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = np.argmax(eval_pred.predictions , axis=1 ) return metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=eval_pred.label_ids ) _SCREAMING_SNAKE_CASE : int = AutoConfig.from_pretrained( model_args.config_name or model_args.model_name_or_path , num_labels=len(__SCREAMING_SNAKE_CASE ) , labelaid=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , finetuning_task="""audio-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ) _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForAudioClassification.from_pretrained( model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , ) # freeze the convolutional waveform encoder if model_args.freeze_feature_encoder: model.freeze_feature_encoder() if training_args.do_train: if data_args.max_train_samples is not None: _SCREAMING_SNAKE_CASE : Dict = ( raw_datasets["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) ) ) # Set the training transforms raw_datasets["train"].set_transform(__SCREAMING_SNAKE_CASE , output_all_columns=__SCREAMING_SNAKE_CASE ) if training_args.do_eval: if data_args.max_eval_samples is not None: _SCREAMING_SNAKE_CASE : int = ( raw_datasets["""eval"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) ) ) # Set the validation transforms raw_datasets["eval"].set_transform(__SCREAMING_SNAKE_CASE , output_all_columns=__SCREAMING_SNAKE_CASE ) # Initialize our trainer _SCREAMING_SNAKE_CASE : Union[str, Any] = Trainer( model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=raw_datasets["""train"""] if training_args.do_train else None , eval_dataset=raw_datasets["""eval"""] if training_args.do_eval else None , compute_metrics=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , ) # Training if training_args.do_train: _SCREAMING_SNAKE_CASE : List[str] = None if training_args.resume_from_checkpoint is not None: _SCREAMING_SNAKE_CASE : List[str] = training_args.resume_from_checkpoint elif last_checkpoint is not None: _SCREAMING_SNAKE_CASE : List[str] = last_checkpoint _SCREAMING_SNAKE_CASE : Any = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE ) trainer.save_model() trainer.log_metrics("""train""" , train_result.metrics ) trainer.save_metrics("""train""" , train_result.metrics ) trainer.save_state() # Evaluation if training_args.do_eval: _SCREAMING_SNAKE_CASE : Tuple = trainer.evaluate() trainer.log_metrics("""eval""" , __SCREAMING_SNAKE_CASE ) trainer.save_metrics("""eval""" , __SCREAMING_SNAKE_CASE ) # Write model card and (optionally) push to hub _SCREAMING_SNAKE_CASE : Union[str, Any] = { """finetuned_from""": model_args.model_name_or_path, """tasks""": """audio-classification""", """dataset""": data_args.dataset_name, """tags""": ["""audio-classification"""], } if training_args.push_to_hub: trainer.push_to_hub(**__SCREAMING_SNAKE_CASE ) else: trainer.create_model_card(**__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
711
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
0
"""simple docstring""" from copy import deepcopy from typing import Optional, Union import numpy as np from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding from ...utils import TensorType, is_tf_available, is_torch_available if is_torch_available(): import torch if is_tf_available(): import tensorflow as tf class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor"] a = "SamImageProcessor" def __init__( self : Dict , _A : Any): """simple docstring""" super().__init__(_A) _SCREAMING_SNAKE_CASE : Tuple = self.image_processor _SCREAMING_SNAKE_CASE : Optional[int] = -1_0 _SCREAMING_SNAKE_CASE : str = self.image_processor.size["""longest_edge"""] def __call__( self : Union[str, Any] , _A : Tuple=None , _A : List[str]=None , _A : Optional[int]=None , _A : Tuple=None , _A : Optional[Union[str, TensorType]] = None , **_A : int , ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.image_processor( _A , return_tensors=_A , **_A , ) # pop arguments that are not used in the foward but used nevertheless _SCREAMING_SNAKE_CASE : Optional[int] = encoding_image_processor["""original_sizes"""] if hasattr(_A , """numpy"""): # Checks if Torch or TF tensor _SCREAMING_SNAKE_CASE : Optional[Any] = original_sizes.numpy() _SCREAMING_SNAKE_CASE : Optional[Any] = self._check_and_preprocess_points( input_points=_A , input_labels=_A , input_boxes=_A , ) _SCREAMING_SNAKE_CASE : List[Any] = self._normalize_and_convert( _A , _A , input_points=_A , input_labels=_A , input_boxes=_A , return_tensors=_A , ) return encoding_image_processor def _lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : Tuple , _A : Union[str, Any]=None , _A : Tuple=None , _A : int=None , _A : List[str]="pt" , ): """simple docstring""" if input_points is not None: if len(_A) != len(_A): _SCREAMING_SNAKE_CASE : List[str] = [ self._normalize_coordinates(self.target_size , _A , original_sizes[0]) for point in input_points ] else: _SCREAMING_SNAKE_CASE : List[Any] = [ self._normalize_coordinates(self.target_size , _A , _A) for point, original_size in zip(_A , _A) ] # check that all arrays have the same shape if not all(point.shape == input_points[0].shape for point in input_points): if input_labels is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = self._pad_points_and_labels(_A , _A) _SCREAMING_SNAKE_CASE : List[str] = np.array(_A) if input_labels is not None: _SCREAMING_SNAKE_CASE : Any = np.array(_A) if input_boxes is not None: if len(_A) != len(_A): _SCREAMING_SNAKE_CASE : Optional[int] = [ self._normalize_coordinates(self.target_size , _A , original_sizes[0] , is_bounding_box=_A) for box in input_boxes ] else: _SCREAMING_SNAKE_CASE : List[Any] = [ self._normalize_coordinates(self.target_size , _A , _A , is_bounding_box=_A) for box, original_size in zip(_A , _A) ] _SCREAMING_SNAKE_CASE : Any = np.array(_A) if input_boxes is not None: if return_tensors == "pt": _SCREAMING_SNAKE_CASE : Optional[int] = torch.from_numpy(_A) # boxes batch size of 1 by default _SCREAMING_SNAKE_CASE : Dict = input_boxes.unsqueeze(1) if len(input_boxes.shape) != 3 else input_boxes elif return_tensors == "tf": _SCREAMING_SNAKE_CASE : List[str] = tf.convert_to_tensor(_A) # boxes batch size of 1 by default _SCREAMING_SNAKE_CASE : int = tf.expand_dims(_A , 1) if len(input_boxes.shape) != 3 else input_boxes encoding_image_processor.update({"""input_boxes""": input_boxes}) if input_points is not None: if return_tensors == "pt": _SCREAMING_SNAKE_CASE : List[str] = torch.from_numpy(_A) # point batch size of 1 by default _SCREAMING_SNAKE_CASE : str = input_points.unsqueeze(1) if len(input_points.shape) != 4 else input_points elif return_tensors == "tf": _SCREAMING_SNAKE_CASE : List[Any] = tf.convert_to_tensor(_A) # point batch size of 1 by default _SCREAMING_SNAKE_CASE : int = tf.expand_dims(_A , 1) if len(input_points.shape) != 4 else input_points encoding_image_processor.update({"""input_points""": input_points}) if input_labels is not None: if return_tensors == "pt": _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(_A) # point batch size of 1 by default _SCREAMING_SNAKE_CASE : Optional[int] = input_labels.unsqueeze(1) if len(input_labels.shape) != 3 else input_labels elif return_tensors == "tf": _SCREAMING_SNAKE_CASE : Tuple = tf.convert_to_tensor(_A) # point batch size of 1 by default _SCREAMING_SNAKE_CASE : Optional[Any] = tf.expand_dims(_A , 1) if len(input_labels.shape) != 3 else input_labels encoding_image_processor.update({"""input_labels""": input_labels}) return encoding_image_processor def _lowerCAmelCase ( self : List[Any] , _A : Dict , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : str = max([point.shape[0] for point in input_points]) _SCREAMING_SNAKE_CASE : Any = [] for i, point in enumerate(_A): if point.shape[0] != expected_nb_points: _SCREAMING_SNAKE_CASE : Optional[int] = np.concatenate( [point, np.zeros((expected_nb_points - point.shape[0], 2)) + self.point_pad_value] , axis=0) _SCREAMING_SNAKE_CASE : Optional[Any] = np.append(input_labels[i] , [self.point_pad_value]) processed_input_points.append(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = processed_input_points return input_points, input_labels def _lowerCAmelCase ( self : Optional[int] , _A : int , _A : np.ndarray , _A : Union[str, Any] , _A : Tuple=False): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = original_size _SCREAMING_SNAKE_CASE : Any = self.image_processor._get_preprocess_shape(_A , longest_edge=_A) _SCREAMING_SNAKE_CASE : Dict = deepcopy(_A).astype(_A) if is_bounding_box: _SCREAMING_SNAKE_CASE : Optional[Any] = coords.reshape(-1 , 2 , 2) _SCREAMING_SNAKE_CASE : str = coords[..., 0] * (new_w / old_w) _SCREAMING_SNAKE_CASE : List[Any] = coords[..., 1] * (new_h / old_h) if is_bounding_box: _SCREAMING_SNAKE_CASE : List[str] = coords.reshape(-1 , 4) return coords def _lowerCAmelCase ( self : Union[str, Any] , _A : Dict=None , _A : Dict=None , _A : List[str]=None , ): """simple docstring""" if input_points is not None: if hasattr(_A , """numpy"""): # Checks for TF or Torch tensor _SCREAMING_SNAKE_CASE : Tuple = input_points.numpy().tolist() if not isinstance(_A , _A) or not isinstance(input_points[0] , _A): raise ValueError("""Input points must be a list of list of floating points.""") _SCREAMING_SNAKE_CASE : str = [np.array(_A) for input_point in input_points] else: _SCREAMING_SNAKE_CASE : Optional[Any] = None if input_labels is not None: if hasattr(_A , """numpy"""): _SCREAMING_SNAKE_CASE : Optional[Any] = input_labels.numpy().tolist() if not isinstance(_A , _A) or not isinstance(input_labels[0] , _A): raise ValueError("""Input labels must be a list of list integers.""") _SCREAMING_SNAKE_CASE : Optional[Any] = [np.array(_A) for label in input_labels] else: _SCREAMING_SNAKE_CASE : Optional[Any] = None if input_boxes is not None: if hasattr(_A , """numpy"""): _SCREAMING_SNAKE_CASE : Union[str, Any] = input_boxes.numpy().tolist() if ( not isinstance(_A , _A) or not isinstance(input_boxes[0] , _A) or not isinstance(input_boxes[0][0] , _A) ): raise ValueError("""Input boxes must be a list of list of list of floating points.""") _SCREAMING_SNAKE_CASE : Optional[int] = [np.array(_A).astype(np.floataa) for box in input_boxes] else: _SCREAMING_SNAKE_CASE : Any = None return input_points, input_labels, input_boxes @property def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.image_processor.model_input_names return list(dict.fromkeys(_A)) def _lowerCAmelCase ( self : Optional[Any] , *_A : str , **_A : Optional[int]): """simple docstring""" return self.image_processor.post_process_masks(*_A , **_A)
712
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
0
import argparse import torch from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: # Initialise PyTorch model _SCREAMING_SNAKE_CASE : Union[str, Any] = BertConfig.from_json_file(__SCREAMING_SNAKE_CASE ) print(F"""Building PyTorch model from configuration: {config}""" ) _SCREAMING_SNAKE_CASE : List[Any] = BertForPreTraining(__SCREAMING_SNAKE_CASE ) # Load weights from tf checkpoint load_tf_weights_in_bert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model print(F"""Save PyTorch model to {pytorch_dump_path}""" ) torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--bert_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained BERT model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
713
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
0
"""simple docstring""" import os import pytest from attr import dataclass lowerCAmelCase_ = '''us-east-1''' # defaults region @dataclass class _snake_case : """simple docstring""" a = 42 a = "arn:aws:iam::558105141721:role/sagemaker_execution_role" a = { "task_name": "mnli", "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "do_train": True, "do_eval": True, "do_predict": True, "output_dir": "/opt/ml/model", "overwrite_output_dir": True, "max_steps": 5_00, "save_steps": 55_00, } a = {**hyperparameters, "max_steps": 10_00} @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" if self.framework == "pytorch": return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"}, {"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"}, ] else: return [ {"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"}, {"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"}, {"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"}, ] @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return f"""{self.framework}-transfromers-test""" @property def _lowerCAmelCase ( self : List[Any]): """simple docstring""" return f"""./tests/sagemaker/scripts/{self.framework}""" @property def _lowerCAmelCase ( self : Dict): """simple docstring""" if self.framework == "pytorch": return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04" else: return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04" @pytest.fixture(scope="""class""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE = SageMakerTestEnvironment(framework=request.cls.framework )
714
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
0
"""simple docstring""" from typing import List, Optional, Tuple, Union import torch from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( __snake_case ): """simple docstring""" def __init__( self : List[Any] , _A : List[str] , _A : List[str]): """simple docstring""" super().__init__() # make sure scheduler can always be converted to DDIM _SCREAMING_SNAKE_CASE : Union[str, Any] = DDIMScheduler.from_config(scheduler.config) self.register_modules(unet=_A , scheduler=_A) @torch.no_grad() def __call__( self : str , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : float = 0.0 , _A : int = 5_0 , _A : Optional[bool] = None , _A : Optional[str] = "pil" , _A : bool = True , ): """simple docstring""" if isinstance(self.unet.config.sample_size , _A): _SCREAMING_SNAKE_CASE : Optional[Any] = ( batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size, ) else: _SCREAMING_SNAKE_CASE : str = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size) if isinstance(_A , _A) and len(_A) != batch_size: raise ValueError( f"""You have passed a list of generators of length {len(_A)}, but requested an effective batch""" f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""") _SCREAMING_SNAKE_CASE : Tuple = randn_tensor(_A , generator=_A , device=self.device , dtype=self.unet.dtype) # set step values self.scheduler.set_timesteps(_A) for t in self.progress_bar(self.scheduler.timesteps): # 1. predict noise model_output _SCREAMING_SNAKE_CASE : Union[str, Any] = self.unet(_A , _A).sample # 2. predict previous mean of image x_t-1 and add variance depending on eta # eta corresponds to η in paper and should be between [0, 1] # do x_t -> x_t-1 _SCREAMING_SNAKE_CASE : str = self.scheduler.step( _A , _A , _A , eta=_A , use_clipped_model_output=_A , generator=_A).prev_sample _SCREAMING_SNAKE_CASE : List[str] = (image / 2 + 0.5).clamp(0 , 1) _SCREAMING_SNAKE_CASE : Dict = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE : List[Any] = self.numpy_to_pil(_A) if not return_dict: return (image,) return ImagePipelineOutput(images=_A)
715
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
0
"""simple docstring""" from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''microsoft/cvt-13''': '''https://huggingface.co/microsoft/cvt-13/resolve/main/config.json''', # See all Cvt models at https://huggingface.co/models?filter=cvt } class _snake_case ( __snake_case ): """simple docstring""" a = "cvt" def __init__( self : Any , _A : List[Any]=3 , _A : List[Any]=[7, 3, 3] , _A : List[Any]=[4, 2, 2] , _A : Union[str, Any]=[2, 1, 1] , _A : Tuple=[6_4, 1_9_2, 3_8_4] , _A : int=[1, 3, 6] , _A : Tuple=[1, 2, 1_0] , _A : Optional[int]=[4.0, 4.0, 4.0] , _A : Any=[0.0, 0.0, 0.0] , _A : Union[str, Any]=[0.0, 0.0, 0.0] , _A : str=[0.0, 0.0, 0.1] , _A : Any=[True, True, True] , _A : Any=[False, False, True] , _A : Tuple=["dw_bn", "dw_bn", "dw_bn"] , _A : Optional[Any]=[3, 3, 3] , _A : Any=[1, 1, 1] , _A : Tuple=[2, 2, 2] , _A : List[Any]=[1, 1, 1] , _A : Dict=[1, 1, 1] , _A : Dict=0.02 , _A : List[Any]=1e-12 , **_A : List[Any] , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = num_channels _SCREAMING_SNAKE_CASE : List[str] = patch_sizes _SCREAMING_SNAKE_CASE : Any = patch_stride _SCREAMING_SNAKE_CASE : Tuple = patch_padding _SCREAMING_SNAKE_CASE : Tuple = embed_dim _SCREAMING_SNAKE_CASE : int = num_heads _SCREAMING_SNAKE_CASE : Tuple = depth _SCREAMING_SNAKE_CASE : Optional[Any] = mlp_ratio _SCREAMING_SNAKE_CASE : Union[str, Any] = attention_drop_rate _SCREAMING_SNAKE_CASE : Optional[int] = drop_rate _SCREAMING_SNAKE_CASE : Optional[int] = drop_path_rate _SCREAMING_SNAKE_CASE : Any = qkv_bias _SCREAMING_SNAKE_CASE : Dict = cls_token _SCREAMING_SNAKE_CASE : str = qkv_projection_method _SCREAMING_SNAKE_CASE : int = kernel_qkv _SCREAMING_SNAKE_CASE : int = padding_kv _SCREAMING_SNAKE_CASE : List[str] = stride_kv _SCREAMING_SNAKE_CASE : List[Any] = padding_q _SCREAMING_SNAKE_CASE : Dict = stride_q _SCREAMING_SNAKE_CASE : str = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = layer_norm_eps
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
import inspect import unittest from transformers import ViTMSNConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ViTMSNForImageClassification, ViTMSNModel from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import ViTImageProcessor class _snake_case : """simple docstring""" def __init__( self : Tuple , _A : str , _A : int=1_3 , _A : List[str]=3_0 , _A : Dict=2 , _A : Optional[Any]=3 , _A : Tuple=True , _A : Union[str, Any]=True , _A : Optional[int]=3_2 , _A : int=5 , _A : Union[str, Any]=4 , _A : List[Any]=3_7 , _A : Tuple="gelu" , _A : Optional[int]=0.1 , _A : Union[str, Any]=0.1 , _A : Union[str, Any]=1_0 , _A : int=0.02 , _A : int=None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = parent _SCREAMING_SNAKE_CASE : str = batch_size _SCREAMING_SNAKE_CASE : int = image_size _SCREAMING_SNAKE_CASE : Optional[Any] = patch_size _SCREAMING_SNAKE_CASE : Dict = num_channels _SCREAMING_SNAKE_CASE : int = is_training _SCREAMING_SNAKE_CASE : Union[str, Any] = use_labels _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Any = num_hidden_layers _SCREAMING_SNAKE_CASE : Tuple = num_attention_heads _SCREAMING_SNAKE_CASE : Union[str, Any] = intermediate_size _SCREAMING_SNAKE_CASE : Optional[int] = hidden_act _SCREAMING_SNAKE_CASE : int = hidden_dropout_prob _SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size _SCREAMING_SNAKE_CASE : Optional[int] = initializer_range _SCREAMING_SNAKE_CASE : str = scope # in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token) _SCREAMING_SNAKE_CASE : Any = (image_size // patch_size) ** 2 _SCREAMING_SNAKE_CASE : List[str] = num_patches + 1 def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Optional[int] = None if self.use_labels: _SCREAMING_SNAKE_CASE : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : List[Any] = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : str): """simple docstring""" return ViTMSNConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , ) def _lowerCAmelCase ( self : Tuple , _A : List[str] , _A : Any , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = ViTMSNModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : List[Any] = model(_A) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size)) def _lowerCAmelCase ( self : int , _A : str , _A : Optional[int] , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = self.type_sequence_label_size _SCREAMING_SNAKE_CASE : Any = ViTMSNForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = model(_A , labels=_A) print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""") print("""Labels: {labels}""") self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _SCREAMING_SNAKE_CASE : int = 1 _SCREAMING_SNAKE_CASE : Any = ViTMSNForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : List[str] = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : int = config_and_inputs _SCREAMING_SNAKE_CASE : str = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else () a = ( {"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = ViTMSNModelTester(self) _SCREAMING_SNAKE_CASE : Optional[int] = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" self.config_tester.run_common_tests() @unittest.skip(reason="""ViTMSN does not use inputs_embeds""") def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" pass def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : List[str] = model_class(_A) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _SCREAMING_SNAKE_CASE : Dict = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear)) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A) _SCREAMING_SNAKE_CASE : List[Any] = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : List[str] = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : List[Any] = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : List[Any] = ViTMSNModel.from_pretrained(_A) self.assertIsNotNone(_A) def lowerCamelCase_()-> int: _SCREAMING_SNAKE_CASE : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ) return image @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Dict): """simple docstring""" return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""") if is_vision_available() else None @slow def _lowerCAmelCase ( self : List[Any]): """simple docstring""" torch.manual_seed(2) _SCREAMING_SNAKE_CASE : Any = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""").to(_A) _SCREAMING_SNAKE_CASE : Any = self.default_image_processor _SCREAMING_SNAKE_CASE : List[Any] = prepare_img() _SCREAMING_SNAKE_CASE : Optional[Any] = image_processor(images=_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = model(**_A) # verify the logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : Union[str, Any] = torch.tensor([-0.0_803, -0.4_454, -0.2_375]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4))
717
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
0
from __future__ import annotations from collections import namedtuple def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> tuple: _SCREAMING_SNAKE_CASE : Any = namedtuple("""result""" , """name value""" ) if (voltage, current, power).count(0 ) != 1: raise ValueError("""Only one argument must be 0""" ) elif power < 0: raise ValueError( """Power cannot be negative in any electrical/electronics system""" ) elif voltage == 0: return result("""voltage""" , power / current ) elif current == 0: return result("""current""" , power / voltage ) elif power == 0: return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) ) else: raise ValueError("""Exactly one argument must be 0""" ) if __name__ == "__main__": import doctest doctest.testmod()
718
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: if not all(x.isalpha() for x in string ): raise ValueError("""String must only contain alphabetic characters.""" ) _SCREAMING_SNAKE_CASE : Any = sorted(string.lower() ) return len(__SCREAMING_SNAKE_CASE ) == len(set(__SCREAMING_SNAKE_CASE ) ) if __name__ == "__main__": lowerCAmelCase_ = input('''Enter a string ''').strip() lowerCAmelCase_ = is_isogram(input_str) print(F"{input_str} is {'an' if isogram else 'not an'} isogram.")
719
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
0
"""simple docstring""" from argparse import ArgumentParser from accelerate.commands.config import get_config_parser from accelerate.commands.env import env_command_parser from accelerate.commands.launch import launch_command_parser from accelerate.commands.test import test_command_parser from accelerate.commands.tpu import tpu_command_parser def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : List[str] = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = parser.add_subparsers(help="""accelerate command helpers""" ) # Register commands get_config_parser(subparsers=__SCREAMING_SNAKE_CASE ) env_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) launch_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) tpu_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) test_command_parser(subparsers=__SCREAMING_SNAKE_CASE ) # Let's go _SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args() if not hasattr(__SCREAMING_SNAKE_CASE , """func""" ): parser.print_help() exit(1 ) # Run args.func(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
720
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available lowerCAmelCase_ = { '''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''], '''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''], } try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''MaskFormerFeatureExtractor'''] lowerCAmelCase_ = ['''MaskFormerImageProcessor'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''MaskFormerForInstanceSegmentation''', '''MaskFormerModel''', '''MaskFormerPreTrainedModel''', ] lowerCAmelCase_ = [ '''MaskFormerSwinBackbone''', '''MaskFormerSwinModel''', '''MaskFormerSwinPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig from .configuration_maskformer_swin import MaskFormerSwinConfig try: if not is_vision_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .feature_extraction_maskformer import MaskFormerFeatureExtractor from .image_processing_maskformer import MaskFormerImageProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_maskformer import ( MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, MaskFormerForInstanceSegmentation, MaskFormerModel, MaskFormerPreTrainedModel, ) from .modeling_maskformer_swin import ( MaskFormerSwinBackbone, MaskFormerSwinModel, MaskFormerSwinPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
721
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
0
"""simple docstring""" import collections import inspect import unittest from transformers import FocalNetConfig from transformers.testing_utils import require_torch, require_vision, slow, torch_device from transformers.utils import cached_property, is_torch_available, is_vision_available from ...test_backbone_common import BackboneTesterMixin from ...test_configuration_common import ConfigTester from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor from ...test_pipeline_mixin import PipelineTesterMixin if is_torch_available(): import torch from torch import nn from transformers import ( FocalNetBackbone, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetModel, ) from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST if is_vision_available(): from PIL import Image from transformers import AutoImageProcessor class _snake_case : """simple docstring""" def __init__( self : Any , _A : int , _A : Optional[Any]=1_3 , _A : Dict=3_2 , _A : int=2 , _A : Optional[Any]=3 , _A : Tuple=1_6 , _A : Tuple=[3_2, 6_4, 1_2_8] , _A : Optional[int]=[1, 2, 1] , _A : List[str]=[2, 2, 4] , _A : Optional[Any]=2 , _A : int=2.0 , _A : Optional[int]=True , _A : Optional[Any]=0.0 , _A : Tuple=0.0 , _A : Union[str, Any]=0.1 , _A : Optional[Any]="gelu" , _A : List[str]=False , _A : int=True , _A : List[str]=0.02 , _A : Tuple=1e-5 , _A : List[str]=True , _A : Optional[int]=None , _A : Dict=True , _A : Any=1_0 , _A : int=8 , _A : Tuple=["stage1", "stage2"] , _A : str=[1, 2] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = parent _SCREAMING_SNAKE_CASE : Any = batch_size _SCREAMING_SNAKE_CASE : Tuple = image_size _SCREAMING_SNAKE_CASE : int = patch_size _SCREAMING_SNAKE_CASE : List[Any] = num_channels _SCREAMING_SNAKE_CASE : List[str] = embed_dim _SCREAMING_SNAKE_CASE : Dict = hidden_sizes _SCREAMING_SNAKE_CASE : Dict = depths _SCREAMING_SNAKE_CASE : List[Any] = num_heads _SCREAMING_SNAKE_CASE : List[str] = window_size _SCREAMING_SNAKE_CASE : Any = mlp_ratio _SCREAMING_SNAKE_CASE : str = qkv_bias _SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob _SCREAMING_SNAKE_CASE : int = attention_probs_dropout_prob _SCREAMING_SNAKE_CASE : Union[str, Any] = drop_path_rate _SCREAMING_SNAKE_CASE : List[str] = hidden_act _SCREAMING_SNAKE_CASE : Optional[Any] = use_absolute_embeddings _SCREAMING_SNAKE_CASE : List[Any] = patch_norm _SCREAMING_SNAKE_CASE : Tuple = layer_norm_eps _SCREAMING_SNAKE_CASE : str = initializer_range _SCREAMING_SNAKE_CASE : Optional[Any] = is_training _SCREAMING_SNAKE_CASE : str = scope _SCREAMING_SNAKE_CASE : Optional[int] = use_labels _SCREAMING_SNAKE_CASE : Optional[int] = type_sequence_label_size _SCREAMING_SNAKE_CASE : int = encoder_stride _SCREAMING_SNAKE_CASE : List[str] = out_features _SCREAMING_SNAKE_CASE : List[str] = out_indices def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Dict = None if self.use_labels: _SCREAMING_SNAKE_CASE : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size) _SCREAMING_SNAKE_CASE : Tuple = self.get_config() return config, pixel_values, labels def _lowerCAmelCase ( self : str): """simple docstring""" return FocalNetConfig( image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , ) def _lowerCAmelCase ( self : List[Any] , _A : List[Any] , _A : Tuple , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = FocalNetModel(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = model(_A) _SCREAMING_SNAKE_CASE : Dict = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths) - 1)) _SCREAMING_SNAKE_CASE : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths) - 1)) self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim)) def _lowerCAmelCase ( self : str , _A : Optional[Any] , _A : Any , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = FocalNetBackbone(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , len(config.out_features)) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size, 8, 8]) # verify channels self.parent.assertEqual(len(model.channels) , len(config.out_features)) self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1]) # verify backbone works with out_features=None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : str = FocalNetBackbone(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A) # verify feature maps self.parent.assertEqual(len(result.feature_maps) , 1) self.parent.assertListEqual(list(result.feature_maps[0].shape) , [self.batch_size, self.image_size * 2, 4, 4]) # verify channels self.parent.assertEqual(len(model.channels) , 1) self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]]) def _lowerCAmelCase ( self : Optional[int] , _A : Any , _A : Union[str, Any] , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = FocalNetForMaskedImageModeling(config=_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Tuple = model(_A) self.parent.assertEqual( result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size)) # test greyscale images _SCREAMING_SNAKE_CASE : Dict = 1 _SCREAMING_SNAKE_CASE : Optional[Any] = FocalNetForMaskedImageModeling(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Optional[Any] = model(_A) self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size)) def _lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.type_sequence_label_size _SCREAMING_SNAKE_CASE : Optional[Any] = FocalNetForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : str = model(_A , labels=_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) # test greyscale images _SCREAMING_SNAKE_CASE : List[str] = 1 _SCREAMING_SNAKE_CASE : List[str] = FocalNetForImageClassification(_A) model.to(_A) model.eval() _SCREAMING_SNAKE_CASE : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size]) _SCREAMING_SNAKE_CASE : Dict = model(_A) self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size)) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.prepare_config_and_inputs() _SCREAMING_SNAKE_CASE : Optional[Any] = config_and_inputs _SCREAMING_SNAKE_CASE : List[str] = {"""pixel_values""": pixel_values} return config, inputs_dict @require_torch class _snake_case ( __snake_case , __snake_case , unittest.TestCase ): """simple docstring""" a = ( ( FocalNetModel, FocalNetForImageClassification, FocalNetForMaskedImageModeling, FocalNetBackbone, ) if is_torch_available() else () ) a = ( {"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification} if is_torch_available() else {} ) a = False a = False a = False a = False a = False def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = FocalNetModelTester(self) _SCREAMING_SNAKE_CASE : str = ConfigTester(self , config_class=_A , embed_dim=3_7 , has_text_modality=_A) def _lowerCAmelCase ( self : Dict): """simple docstring""" self.create_and_test_config_common_properties() self.config_tester.create_and_test_config_to_json_string() self.config_tester.create_and_test_config_to_json_file() self.config_tester.create_and_test_config_from_and_save_pretrained() self.config_tester.create_and_test_config_with_num_labels() self.config_tester.check_config_can_be_init_without_params() self.config_tester.check_config_arguments_init() def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" return def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*_A) def _lowerCAmelCase ( self : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_backbone(*_A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_image_modeling(*_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_image_classification(*_A) @unittest.skip(reason="""FocalNet does not use inputs_embeds""") def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" pass @unittest.skip(reason="""FocalNet does not use feedforward chunking""") def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" pass def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : List[Any] = model_class(_A) self.assertIsInstance(model.get_input_embeddings() , (nn.Module)) _SCREAMING_SNAKE_CASE : List[Any] = model.get_output_embeddings() self.assertTrue(x is None or isinstance(_A , nn.Linear)) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = model_class(_A) _SCREAMING_SNAKE_CASE : Dict = inspect.signature(model.forward) # signature.parameters is an OrderedDict => so arg_names order is deterministic _SCREAMING_SNAKE_CASE : str = [*signature.parameters.keys()] _SCREAMING_SNAKE_CASE : int = ["""pixel_values"""] self.assertListEqual(arg_names[:1] , _A) def _lowerCAmelCase ( self : int , _A : Any , _A : Any , _A : Dict , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = model_class(_A) model.to(_A) model.eval() with torch.no_grad(): _SCREAMING_SNAKE_CASE : str = model(**self._prepare_for_class(_A , _A)) _SCREAMING_SNAKE_CASE : List[str] = outputs.hidden_states _SCREAMING_SNAKE_CASE : int = getattr( self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths) + 1) self.assertEqual(len(_A) , _A) # FocalNet has a different seq_length _SCREAMING_SNAKE_CASE : List[str] = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0]) self.assertListEqual( list(hidden_states[0].shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) _SCREAMING_SNAKE_CASE : str = outputs.reshaped_hidden_states self.assertEqual(len(_A) , _A) _SCREAMING_SNAKE_CASE : str = reshaped_hidden_states[0].shape _SCREAMING_SNAKE_CASE : Optional[Any] = ( reshaped_hidden_states[0].view(_A , _A , height * width).permute(0 , 2 , 1) ) self.assertListEqual( list(reshaped_hidden_states.shape[-2:]) , [num_patches, self.model_tester.embed_dim] , ) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : List[Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : List[str] = True self.check_hidden_states_output(_A , _A , _A , _A) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : Optional[Any] = True self.check_hidden_states_output(_A , _A , _A , _A) def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : Union[str, Any] = 3 _SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.model_tester.image_size if isinstance(self.model_tester.image_size , collections.abc.Iterable) else (self.model_tester.image_size, self.model_tester.image_size) ) _SCREAMING_SNAKE_CASE : str = ( config.patch_size if isinstance(config.patch_size , collections.abc.Iterable) else (config.patch_size, config.patch_size) ) _SCREAMING_SNAKE_CASE : Optional[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0]) _SCREAMING_SNAKE_CASE : Dict = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1]) for model_class in self.all_model_classes[:-1]: _SCREAMING_SNAKE_CASE : List[Any] = True self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width)) # check that output_hidden_states also work using config del inputs_dict["output_hidden_states"] _SCREAMING_SNAKE_CASE : Tuple = True self.check_hidden_states_output(_A , _A , _A , (padded_height, padded_width)) @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = FocalNetModel.from_pretrained(_A) self.assertIsNotNone(_A) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self.model_tester.prepare_config_and_inputs_for_common() _SCREAMING_SNAKE_CASE : str = _config_zero_init(_A) for model_class in self.all_model_classes: _SCREAMING_SNAKE_CASE : int = model_class(config=_A) for name, param in model.named_parameters(): if "embeddings" not in name and param.requires_grad: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , ) @require_vision @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" @cached_property def _lowerCAmelCase ( self : Any): """simple docstring""" return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""") if is_vision_available() else None @slow def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = self.default_image_processor _SCREAMING_SNAKE_CASE : int = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""") _SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[Any] = model(**_A) # verify the logits _SCREAMING_SNAKE_CASE : int = torch.Size((1, 1_0_0_0)) self.assertEqual(outputs.logits.shape , _A) _SCREAMING_SNAKE_CASE : Dict = torch.tensor([0.2_166, -0.4_368, 0.2_191]).to(_A) self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4)) self.assertTrue(outputs.logits.argmax(dim=-1).item() , 2_8_1) @require_torch class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = (FocalNetBackbone,) if is_torch_available() else () a = FocalNetConfig a = False def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = FocalNetModelTester(self)
700
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
0
"""simple docstring""" lowerCAmelCase_ = 65521 def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : int = 1 _SCREAMING_SNAKE_CASE : Optional[int] = 0 for plain_chr in plain_text: _SCREAMING_SNAKE_CASE : Union[str, Any] = (a + ord(__SCREAMING_SNAKE_CASE )) % MOD_ADLER _SCREAMING_SNAKE_CASE : List[Any] = (b + a) % MOD_ADLER return (b << 16) | a
701
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
0
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ : Any = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ : Tuple = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ : str = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
702
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
0
import inspect import os import unittest import torch import accelerate from accelerate import debug_launcher from accelerate.test_utils import ( execute_subprocess_async, require_cpu, require_huggingface_suite, require_multi_gpu, require_single_gpu, ) from accelerate.utils import patch_environment @require_huggingface_suite class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = inspect.getfile(accelerate.test_utils) _SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.sep.join( mod_file.split(os.path.sep)[:-1] + ["""scripts""", """external_deps""", """test_metrics.py"""]) from accelerate.test_utils.scripts.external_deps import test_metrics # noqa: F401 _SCREAMING_SNAKE_CASE : Optional[int] = test_metrics @require_cpu def _lowerCAmelCase ( self : Dict): """simple docstring""" debug_launcher(self.test_metrics.main , num_processes=1) @require_cpu def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" debug_launcher(self.test_metrics.main) @require_single_gpu def _lowerCAmelCase ( self : Dict): """simple docstring""" self.test_metrics.main() @require_multi_gpu def _lowerCAmelCase ( self : Tuple): """simple docstring""" print(f"""Found {torch.cuda.device_count()} devices.""") _SCREAMING_SNAKE_CASE : Optional[int] = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", self.test_file_path] with patch_environment(omp_num_threads=1): execute_subprocess_async(_A , env=os.environ.copy())
703
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> list: _SCREAMING_SNAKE_CASE : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ): for i in range(_ % 2 , arr_size - 1 , 2 ): if arr[i + 1] < arr[i]: _SCREAMING_SNAKE_CASE : Tuple = arr[i + 1], arr[i] return arr if __name__ == "__main__": lowerCAmelCase_ = list(range(10, 0, -1)) print(F"Original: {arr}. Sorted: {odd_even_transposition(arr)}")
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
"""simple docstring""" from ...utils import ( OptionalDependencyNotAvailable, is_torch_available, is_transformers_available, is_transformers_version, ) try: if not (is_transformers_available() and is_torch_available()): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline else: from .camera import create_pan_cameras from .pipeline_shap_e import ShapEPipeline from .pipeline_shap_e_img2img import ShapEImgaImgPipeline from .renderer import ( BoundingBoxVolume, ImportanceRaySampler, MLPNeRFModelOutput, MLPNeRSTFModel, ShapEParamsProjModel, ShapERenderer, StratifiedRaySampler, VoidNeRFModel, )
705
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 1_000 )-> int: _SCREAMING_SNAKE_CASE : List[str] = 1, 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for i in range(1 , n + 1 ): _SCREAMING_SNAKE_CASE : int = prev_numerator + 2 * prev_denominator _SCREAMING_SNAKE_CASE : Optional[int] = prev_numerator + prev_denominator if len(str(__SCREAMING_SNAKE_CASE ) ) > len(str(__SCREAMING_SNAKE_CASE ) ): result.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = numerator _SCREAMING_SNAKE_CASE : Union[str, Any] = denominator return len(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": print(F"{solution() = }")
706
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { '''configuration_graphormer''': ['''GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GraphormerConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''GraphormerForGraphClassification''', '''GraphormerModel''', '''GraphormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_graphormer import ( GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST, GraphormerForGraphClassification, GraphormerModel, GraphormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
707
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
0
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 222.088 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
708
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
0
"""simple docstring""" import sacrebleu as scb from packaging import version from sacrebleu import CHRF import datasets lowerCAmelCase_ = '''\ @inproceedings{popovic-2015-chrf, title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation", month = sep, year = "2015", address = "Lisbon, Portugal", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W15-3049", doi = "10.18653/v1/W15-3049", pages = "392--395", } @inproceedings{popovic-2017-chrf, title = "chr{F}++: words helping character n-grams", author = "Popovi{\'c}, Maja", booktitle = "Proceedings of the Second Conference on Machine Translation", month = sep, year = "2017", address = "Copenhagen, Denmark", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/W17-4770", doi = "10.18653/v1/W17-4770", pages = "612--618", } @inproceedings{post-2018-call, title = "A Call for Clarity in Reporting {BLEU} Scores", author = "Post, Matt", booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers", month = oct, year = "2018", address = "Belgium, Brussels", publisher = "Association for Computational Linguistics", url = "https://www.aclweb.org/anthology/W18-6319", pages = "186--191", } ''' lowerCAmelCase_ = '''\ ChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches, and ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation that is already present in sacrebleu. The implementation here is slightly different from sacrebleu in terms of the required input format. The length of the references and hypotheses lists need to be the same, so you may need to transpose your references compared to sacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534 See the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information. ''' lowerCAmelCase_ = ''' Produces ChrF(++) scores for hypotheses given reference translations. Args: predictions (list of str): The predicted sentences. references (list of list of str): The references. There should be one reference sub-list for each prediction sentence. char_order (int): Character n-gram order. Defaults to `6`. word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`. beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`. lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`. whitespace (bool): If `True`, include whitespaces when extracting character n-grams. eps_smoothing (bool): If `True`, applies epsilon smoothing similar to reference chrF++.py, NLTK and Moses implementations. If `False`, it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`. Returns: \'score\' (float): The chrF (chrF++) score, \'char_order\' (int): The character n-gram order, \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++, \'beta\' (int): Determine the importance of recall w.r.t precision Examples: Example 1--a simple example of calculating chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, references=reference) >>> print(results) {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2} Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2) >>> print(results) {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case: >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."] >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]] >>> chrf = datasets.load_metric("chrf") >>> results = chrf.compute(predictions=prediction, ... references=reference, ... word_order=2, ... lowercase=True) >>> print(results) {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2} ''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if version.parse(scb.__version__) < version.parse("""1.4.12"""): raise ImportWarning( """To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n""" """You can install it with `pip install \"sacrebleu>=1.4.12\"`.""") return datasets.MetricInfo( description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/mjpost/sacreBLEU#chrf--chrf""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Value("""string""" , id="""sequence"""), """references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""") , id="""references"""), }) , codebase_urls=["""https://github.com/mjpost/sacreBLEU#chrf--chrf"""] , reference_urls=[ """https://github.com/m-popovic/chrF""", ] , ) def _lowerCAmelCase ( self : List[str] , _A : str , _A : List[str] , _A : int = CHRF.CHAR_ORDER , _A : int = CHRF.WORD_ORDER , _A : int = CHRF.BETA , _A : bool = False , _A : bool = False , _A : bool = False , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = len(references[0]) if any(len(_A) != references_per_prediction for refs in references): raise ValueError("""Sacrebleu requires the same number of references for each prediction""") _SCREAMING_SNAKE_CASE : Any = [[refs[i] for refs in references] for i in range(_A)] _SCREAMING_SNAKE_CASE : Union[str, Any] = CHRF(_A , _A , _A , _A , _A , _A) _SCREAMING_SNAKE_CASE : List[Any] = sb_chrf.corpus_score(_A , _A) return { "score": output.score, "char_order": output.char_order, "word_order": output.word_order, "beta": output.beta, }
709
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
0
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
710
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
0
import json from typing import List, Optional, Tuple from tokenizers import normalizers from ...tokenization_utils_fast import PreTrainedTokenizerFast from ...utils import logging from .tokenization_funnel import FunnelTokenizer lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''} lowerCAmelCase_ = [ '''small''', '''small-base''', '''medium''', '''medium-base''', '''intermediate''', '''intermediate-base''', '''large''', '''large-base''', '''xlarge''', '''xlarge-base''', ] lowerCAmelCase_ = { '''vocab_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/vocab.txt''', '''funnel-transformer/small-base''': '''https://huggingface.co/funnel-transformer/small-base/resolve/main/vocab.txt''', '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/vocab.txt''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/vocab.txt''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/vocab.txt''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/vocab.txt''', '''funnel-transformer/large-base''': '''https://huggingface.co/funnel-transformer/large-base/resolve/main/vocab.txt''', '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/vocab.txt''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/vocab.txt''' ), }, '''tokenizer_file''': { '''funnel-transformer/small''': '''https://huggingface.co/funnel-transformer/small/resolve/main/tokenizer.json''', '''funnel-transformer/small-base''': ( '''https://huggingface.co/funnel-transformer/small-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/medium''': '''https://huggingface.co/funnel-transformer/medium/resolve/main/tokenizer.json''', '''funnel-transformer/medium-base''': ( '''https://huggingface.co/funnel-transformer/medium-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate''': ( '''https://huggingface.co/funnel-transformer/intermediate/resolve/main/tokenizer.json''' ), '''funnel-transformer/intermediate-base''': ( '''https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/large''': '''https://huggingface.co/funnel-transformer/large/resolve/main/tokenizer.json''', '''funnel-transformer/large-base''': ( '''https://huggingface.co/funnel-transformer/large-base/resolve/main/tokenizer.json''' ), '''funnel-transformer/xlarge''': '''https://huggingface.co/funnel-transformer/xlarge/resolve/main/tokenizer.json''', '''funnel-transformer/xlarge-base''': ( '''https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/tokenizer.json''' ), }, } lowerCAmelCase_ = {F"funnel-transformer/{name}": 512 for name in _model_names} lowerCAmelCase_ = {F"funnel-transformer/{name}": {'''do_lower_case''': True} for name in _model_names} class _snake_case ( __snake_case ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_INIT_CONFIGURATION a = FunnelTokenizer a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = 2 def __init__( self : int , _A : Union[str, Any]=None , _A : Any=None , _A : List[str]=True , _A : Optional[Any]="<unk>" , _A : Optional[int]="<sep>" , _A : int="<pad>" , _A : int="<cls>" , _A : Optional[Any]="<mask>" , _A : Tuple="<s>" , _A : Optional[int]="</s>" , _A : List[Any]=True , _A : Optional[Any]=True , _A : List[Any]=None , _A : List[Any]="##" , **_A : Optional[Any] , ): """simple docstring""" super().__init__( _A , tokenizer_file=_A , do_lower_case=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , bos_token=_A , eos_token=_A , clean_text=_A , tokenize_chinese_chars=_A , strip_accents=_A , wordpieces_prefix=_A , **_A , ) _SCREAMING_SNAKE_CASE : Dict = json.loads(self.backend_tokenizer.normalizer.__getstate__()) if ( normalizer_state.get("""lowercase""" , _A) != do_lower_case or normalizer_state.get("""strip_accents""" , _A) != strip_accents or normalizer_state.get("""handle_chinese_chars""" , _A) != tokenize_chinese_chars ): _SCREAMING_SNAKE_CASE : List[str] = getattr(_A , normalizer_state.pop("""type""")) _SCREAMING_SNAKE_CASE : Tuple = do_lower_case _SCREAMING_SNAKE_CASE : Optional[Any] = strip_accents _SCREAMING_SNAKE_CASE : Any = tokenize_chinese_chars _SCREAMING_SNAKE_CASE : Tuple = normalizer_class(**_A) _SCREAMING_SNAKE_CASE : Dict = do_lower_case def _lowerCAmelCase ( self : int , _A : Optional[int] , _A : List[str]=None): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id] if token_ids_a: output += token_ids_a + [self.sep_token_id] return output def _lowerCAmelCase ( self : Optional[int] , _A : List[int] , _A : Optional[List[int]] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = [self.sep_token_id] _SCREAMING_SNAKE_CASE : Optional[int] = [self.cls_token_id] if token_ids_a is None: return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] return len(cls) * [self.cls_token_type_id] + len(token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1] def _lowerCAmelCase ( self : Tuple , _A : str , _A : Optional[str] = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self._tokenizer.model.save(_A , name=_A) return tuple(_A)
711
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
0
"""simple docstring""" from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : int = 1 _SCREAMING_SNAKE_CASE : Any = True for v in tree[start]: if v not in visited: ret += dfs(__SCREAMING_SNAKE_CASE ) if ret % 2 == 0: cuts.append(__SCREAMING_SNAKE_CASE ) return ret def lowerCamelCase_()-> Dict: dfs(1 ) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 10, 9 lowerCAmelCase_ = defaultdict(list) lowerCAmelCase_ = {} lowerCAmelCase_ = [] lowerCAmelCase_ = 0 lowerCAmelCase_ = [(2, 1), (3, 1), (4, 3), (5, 2), (6, 1), (7, 2), (8, 6), (9, 8), (10, 8)] for u, v in edges: tree[u].append(v) tree[v].append(u) even_tree() print(len(cuts) - 1)
712
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
0
from __future__ import annotations import random # Maximum size of the population. Bigger could be faster but is more memory expensive. lowerCAmelCase_ = 200 # Number of elements selected in every generation of evolution. The selection takes # place from best to worst of that generation and must be smaller than N_POPULATION. lowerCAmelCase_ = 50 # Probability that an element of a generation can mutate, changing one of its genes. # This will guarantee that all genes will be used during evolution. lowerCAmelCase_ = 0.4 # Just a seed to improve randomness required by the algorithm. random.seed(random.randint(0, 1000)) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> tuple[str, float]: _SCREAMING_SNAKE_CASE : Tuple = len([g for position, g in enumerate(__SCREAMING_SNAKE_CASE ) if g == main_target[position]] ) return (item, float(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> tuple[str, str]: _SCREAMING_SNAKE_CASE : Union[str, Any] = random.randint(0 , len(__SCREAMING_SNAKE_CASE ) - 1 ) _SCREAMING_SNAKE_CASE : Any = parent_a[:random_slice] + parent_a[random_slice:] _SCREAMING_SNAKE_CASE : Dict = parent_a[:random_slice] + parent_a[random_slice:] return (child_a, child_a) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : Any = list(__SCREAMING_SNAKE_CASE ) if random.uniform(0 , 1 ) < MUTATION_PROBABILITY: _SCREAMING_SNAKE_CASE : Optional[int] = random.choice(__SCREAMING_SNAKE_CASE ) return "".join(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> list[str]: _SCREAMING_SNAKE_CASE : Dict = [] # Generate more children proportionally to the fitness score. _SCREAMING_SNAKE_CASE : Any = int(parent_a[1] * 100 ) + 1 _SCREAMING_SNAKE_CASE : str = 10 if child_n >= 10 else child_n for _ in range(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = population_score[random.randint(0 , __SCREAMING_SNAKE_CASE )][0] _SCREAMING_SNAKE_CASE : List[Any] = crossover(parent_a[0] , __SCREAMING_SNAKE_CASE ) # Append new string to the population list. pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) pop.append(mutate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return pop def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True )-> tuple[int, int, str]: # Verify if N_POPULATION is bigger than N_SELECTED if N_POPULATION < N_SELECTED: _SCREAMING_SNAKE_CASE : int = F"""{N_POPULATION} must be bigger than {N_SELECTED}""" raise ValueError(__SCREAMING_SNAKE_CASE ) # Verify that the target contains no genes besides the ones inside genes variable. _SCREAMING_SNAKE_CASE : List[str] = sorted({c for c in target if c not in genes} ) if not_in_genes_list: _SCREAMING_SNAKE_CASE : int = F"""{not_in_genes_list} is not in genes list, evolution cannot converge""" raise ValueError(__SCREAMING_SNAKE_CASE ) # Generate random starting population. _SCREAMING_SNAKE_CASE : List[Any] = [] for _ in range(__SCREAMING_SNAKE_CASE ): population.append("""""".join([random.choice(__SCREAMING_SNAKE_CASE ) for i in range(len(__SCREAMING_SNAKE_CASE ) )] ) ) # Just some logs to know what the algorithms is doing. _SCREAMING_SNAKE_CASE : Union[str, Any] = 0, 0 # This loop will end when we find a perfect match for our target. while True: generation += 1 total_population += len(__SCREAMING_SNAKE_CASE ) # Random population created. Now it's time to evaluate. # Adding a bit of concurrency can make everything faster, # # import concurrent.futures # population_score: list[tuple[str, float]] = [] # with concurrent.futures.ThreadPoolExecutor( # max_workers=NUM_WORKERS) as executor: # futures = {executor.submit(evaluate, item) for item in population} # concurrent.futures.wait(futures) # population_score = [item.result() for item in futures] # # but with a simple algorithm like this, it will probably be slower. # We just need to call evaluate for every item inside the population. _SCREAMING_SNAKE_CASE : Union[str, Any] = [evaluate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for item in population] # Check if there is a matching evolution. _SCREAMING_SNAKE_CASE : Any = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[1] , reverse=__SCREAMING_SNAKE_CASE ) if population_score[0][0] == target: return (generation, total_population, population_score[0][0]) # Print the best result every 10 generation. # Just to know that the algorithm is working. if debug and generation % 10 == 0: print( F"""\nGeneration: {generation}""" F"""\nTotal Population:{total_population}""" F"""\nBest score: {population_score[0][1]}""" F"""\nBest string: {population_score[0][0]}""" ) # Flush the old population, keeping some of the best evolutions. # Keeping this avoid regression of evolution. _SCREAMING_SNAKE_CASE : Dict = population[: int(N_POPULATION / 3 )] population.clear() population.extend(__SCREAMING_SNAKE_CASE ) # Normalize population score to be between 0 and 1. _SCREAMING_SNAKE_CASE : List[str] = [ (item, score / len(__SCREAMING_SNAKE_CASE )) for item, score in population_score ] # This is selection for i in range(__SCREAMING_SNAKE_CASE ): population.extend(select(population_score[int(__SCREAMING_SNAKE_CASE )] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) # Check if the population has already reached the maximum value and if so, # break the cycle. If this check is disabled, the algorithm will take # forever to compute large strings, but will also calculate small strings in # a far fewer generations. if len(__SCREAMING_SNAKE_CASE ) > N_POPULATION: break if __name__ == "__main__": lowerCAmelCase_ = ( '''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!''' ) lowerCAmelCase_ = list( ''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm''' '''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\''' ) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = basic(target_str, genes_list) print( F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}" )
713
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE = 0 # if input_string is "aba" than new_input_string become "a|b|a" _SCREAMING_SNAKE_CASE = """""" _SCREAMING_SNAKE_CASE = """""" # append each character + "|" in new_string for range(0, length-1) for i in input_string[: len(__SCREAMING_SNAKE_CASE ) - 1]: new_input_string += i + "|" # append last character new_input_string += input_string[-1] # we will store the starting and ending of previous furthest ending palindromic # substring _SCREAMING_SNAKE_CASE = 0, 0 # length[i] shows the length of palindromic substring with center i _SCREAMING_SNAKE_CASE = [1 for i in range(len(__SCREAMING_SNAKE_CASE ) )] # for each character in new_string find corresponding palindromic string _SCREAMING_SNAKE_CASE = 0 for j in range(len(__SCREAMING_SNAKE_CASE ) ): _SCREAMING_SNAKE_CASE = 1 if j > r else min(length[l + r - j] // 2 , r - j + 1 ) while ( j - k >= 0 and j + k < len(__SCREAMING_SNAKE_CASE ) and new_input_string[k + j] == new_input_string[j - k] ): k += 1 _SCREAMING_SNAKE_CASE = 2 * k - 1 # does this string is ending after the previously explored end (that is r) ? # if yes the update the new r to the last index of this if j + k - 1 > r: _SCREAMING_SNAKE_CASE = j - k + 1 # noqa: E741 _SCREAMING_SNAKE_CASE = j + k - 1 # update max_length and start position if max_length < length[j]: _SCREAMING_SNAKE_CASE = length[j] _SCREAMING_SNAKE_CASE = j # create that string _SCREAMING_SNAKE_CASE = new_input_string[start - max_length // 2 : start + max_length // 2 + 1] for i in s: if i != "|": output_string += i return output_string if __name__ == "__main__": import doctest doctest.testmod()
714
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
0
"""simple docstring""" from collections import defaultdict from typing import Optional from ..image_utils import load_image from ..utils import ( add_end_docstrings, is_torch_available, logging, requires_backends, ) from .base import PIPELINE_INIT_ARGS, ChunkPipeline if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING lowerCAmelCase_ = logging.get_logger(__name__) @add_end_docstrings(__snake_case ) class _snake_case ( __snake_case ): """simple docstring""" def __init__( self : List[str] , **_A : Union[str, Any]): """simple docstring""" super().__init__(**_A) requires_backends(self , """vision""") requires_backends(self , """torch""") if self.framework != "pt": raise ValueError(f"""The {self.__class__} is only available in PyTorch.""") self.check_model_type(_A) def _lowerCAmelCase ( self : Tuple , **_A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Dict = {} _SCREAMING_SNAKE_CASE : Dict = {} # preprocess args if "points_per_batch" in kwargs: _SCREAMING_SNAKE_CASE : Optional[int] = kwargs["""points_per_batch"""] if "points_per_crop" in kwargs: _SCREAMING_SNAKE_CASE : List[Any] = kwargs["""points_per_crop"""] if "crops_n_layers" in kwargs: _SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs["""crops_n_layers"""] if "crop_overlap_ratio" in kwargs: _SCREAMING_SNAKE_CASE : List[str] = kwargs["""crop_overlap_ratio"""] if "crop_n_points_downscale_factor" in kwargs: _SCREAMING_SNAKE_CASE : List[str] = kwargs["""crop_n_points_downscale_factor"""] # postprocess args if "pred_iou_thresh" in kwargs: _SCREAMING_SNAKE_CASE : Any = kwargs["""pred_iou_thresh"""] if "stability_score_offset" in kwargs: _SCREAMING_SNAKE_CASE : Any = kwargs["""stability_score_offset"""] if "mask_threshold" in kwargs: _SCREAMING_SNAKE_CASE : int = kwargs["""mask_threshold"""] if "stability_score_thresh" in kwargs: _SCREAMING_SNAKE_CASE : str = kwargs["""stability_score_thresh"""] if "crops_nms_thresh" in kwargs: _SCREAMING_SNAKE_CASE : List[str] = kwargs["""crops_nms_thresh"""] if "output_rle_mask" in kwargs: _SCREAMING_SNAKE_CASE : int = kwargs["""output_rle_mask"""] if "output_bboxes_mask" in kwargs: _SCREAMING_SNAKE_CASE : Union[str, Any] = kwargs["""output_bboxes_mask"""] return preprocess_kwargs, forward_params, postprocess_kwargs def __call__( self : str , _A : Any , *_A : int , _A : Optional[Any]=None , _A : Optional[Any]=None , **_A : Union[str, Any]): """simple docstring""" return super().__call__(_A , *_A , num_workers=_A , batch_size=_A , **_A) def _lowerCAmelCase ( self : int , _A : int , _A : Union[str, Any]=6_4 , _A : int = 0 , _A : float = 5_1_2 / 1_5_0_0 , _A : Optional[int] = 3_2 , _A : Optional[int] = 1 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = load_image(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor.size["""longest_edge"""] _SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor.generate_crop_boxes( _A , _A , _A , _A , _A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = self.image_processor(images=_A , return_tensors="""pt""") with self.device_placement(): if self.framework == "pt": _SCREAMING_SNAKE_CASE : Dict = self.get_inference_context() with inference_context(): _SCREAMING_SNAKE_CASE : Dict = self._ensure_tensor_on_device(_A , device=self.device) _SCREAMING_SNAKE_CASE : Optional[int] = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""")) _SCREAMING_SNAKE_CASE : List[Any] = image_embeddings _SCREAMING_SNAKE_CASE : Dict = grid_points.shape[1] _SCREAMING_SNAKE_CASE : int = points_per_batch if points_per_batch is not None else n_points if points_per_batch <= 0: raise ValueError( """Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """ """To return all points at once, set points_per_batch to None""") for i in range(0 , _A , _A): _SCREAMING_SNAKE_CASE : List[Any] = grid_points[:, i : i + points_per_batch, :, :] _SCREAMING_SNAKE_CASE : Any = input_labels[:, i : i + points_per_batch] _SCREAMING_SNAKE_CASE : Any = i == n_points - points_per_batch yield { "input_points": batched_points, "input_labels": labels, "input_boxes": crop_boxes, "is_last": is_last, **model_inputs, } def _lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=0.88 , _A : List[str]=0.95 , _A : Union[str, Any]=0 , _A : Union[str, Any]=1 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = model_inputs.pop("""input_boxes""") _SCREAMING_SNAKE_CASE : Any = model_inputs.pop("""is_last""") _SCREAMING_SNAKE_CASE : Optional[int] = model_inputs.pop("""original_sizes""").tolist() _SCREAMING_SNAKE_CASE : Optional[int] = model_inputs.pop("""reshaped_input_sizes""").tolist() _SCREAMING_SNAKE_CASE : str = self.model(**_A) # post processing happens here in order to avoid CPU GPU copies of ALL the masks _SCREAMING_SNAKE_CASE : Any = model_outputs["""pred_masks"""] _SCREAMING_SNAKE_CASE : List[str] = self.image_processor.post_process_masks( _A , _A , _A , _A , binarize=_A) _SCREAMING_SNAKE_CASE : Tuple = model_outputs["""iou_scores"""] _SCREAMING_SNAKE_CASE : Tuple = self.image_processor.filter_masks( masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , _A , _A , _A , _A , ) return { "masks": masks, "is_last": is_last, "boxes": boxes, "iou_scores": iou_scores, } def _lowerCAmelCase ( self : Tuple , _A : Tuple , _A : Tuple=False , _A : int=False , _A : Dict=0.7 , ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = [] _SCREAMING_SNAKE_CASE : Union[str, Any] = [] _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for model_output in model_outputs: all_scores.append(model_output.pop("""iou_scores""")) all_masks.extend(model_output.pop("""masks""")) all_boxes.append(model_output.pop("""boxes""")) _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(_A) _SCREAMING_SNAKE_CASE : Tuple = torch.cat(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.post_process_for_mask_generation( _A , _A , _A , _A) _SCREAMING_SNAKE_CASE : List[str] = defaultdict(_A) for output in model_outputs: for k, v in output.items(): extra[k].append(_A) _SCREAMING_SNAKE_CASE : Dict = {} if output_rle_mask: _SCREAMING_SNAKE_CASE : Tuple = rle_mask if output_bboxes_mask: _SCREAMING_SNAKE_CASE : int = bounding_boxes return {"masks": output_masks, "scores": iou_scores, **optional, **extra}
715
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
0
"""simple docstring""" from collections.abc import Iterator, MutableMapping from dataclasses import dataclass from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''KEY''') lowerCAmelCase_ = TypeVar('''VAL''') @dataclass(frozen=__snake_case , slots=__snake_case ) class _snake_case ( Generic[KEY, VAL] ): """simple docstring""" a = 42 a = 42 class _snake_case ( _Item ): """simple docstring""" def __init__( self : Any): """simple docstring""" super().__init__(_A , _A) def __bool__( self : Any): """simple docstring""" return False lowerCAmelCase_ = _DeletedItem() class _snake_case ( MutableMapping[KEY, VAL] ): """simple docstring""" def __init__( self : Dict , _A : int = 8 , _A : float = 0.75): """simple docstring""" _SCREAMING_SNAKE_CASE : int = initial_block_size _SCREAMING_SNAKE_CASE : list[_Item | None] = [None] * initial_block_size assert 0.0 < capacity_factor < 1.0 _SCREAMING_SNAKE_CASE : Tuple = capacity_factor _SCREAMING_SNAKE_CASE : List[str] = 0 def _lowerCAmelCase ( self : Union[str, Any] , _A : KEY): """simple docstring""" return hash(_A) % len(self._buckets) def _lowerCAmelCase ( self : List[Any] , _A : int): """simple docstring""" return (ind + 1) % len(self._buckets) def _lowerCAmelCase ( self : List[str] , _A : int , _A : KEY , _A : VAL): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self._buckets[ind] if not stored: _SCREAMING_SNAKE_CASE : str = _Item(_A , _A) self._len += 1 return True elif stored.key == key: _SCREAMING_SNAKE_CASE : Union[str, Any] = _Item(_A , _A) return True else: return False def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = len(self._buckets) * self._capacity_factor return len(self) >= int(_A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if len(self._buckets) <= self._initial_block_size: return False _SCREAMING_SNAKE_CASE : Optional[int] = len(self._buckets) * self._capacity_factor / 2 return len(self) < limit def _lowerCAmelCase ( self : Optional[Any] , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = self._buckets _SCREAMING_SNAKE_CASE : List[Any] = [None] * new_size _SCREAMING_SNAKE_CASE : Tuple = 0 for item in old_buckets: if item: self._add_item(item.key , item.val) def _lowerCAmelCase ( self : int): """simple docstring""" self._resize(len(self._buckets) * 2) def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" self._resize(len(self._buckets) // 2) def _lowerCAmelCase ( self : Optional[Any] , _A : KEY): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = self._get_bucket_index(_A) for _ in range(len(self._buckets)): yield ind _SCREAMING_SNAKE_CASE : Any = self._get_next_ind(_A) def _lowerCAmelCase ( self : List[Any] , _A : KEY , _A : VAL): """simple docstring""" for ind in self._iterate_buckets(_A): if self._try_set(_A , _A , _A): break def __setitem__( self : Tuple , _A : KEY , _A : VAL): """simple docstring""" if self._is_full(): self._size_up() self._add_item(_A , _A) def __delitem__( self : Optional[Any] , _A : KEY): """simple docstring""" for ind in self._iterate_buckets(_A): _SCREAMING_SNAKE_CASE : str = self._buckets[ind] if item is None: raise KeyError(_A) if item is _deleted: continue if item.key == key: _SCREAMING_SNAKE_CASE : int = _deleted self._len -= 1 break if self._is_sparse(): self._size_down() def __getitem__( self : Any , _A : KEY): """simple docstring""" for ind in self._iterate_buckets(_A): _SCREAMING_SNAKE_CASE : Optional[int] = self._buckets[ind] if item is None: break if item is _deleted: continue if item.key == key: return item.val raise KeyError(_A) def __len__( self : List[Any]): """simple docstring""" return self._len def __iter__( self : str): """simple docstring""" yield from (item.key for item in self._buckets if item) def __repr__( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = """ ,""".join( f"""{item.key}: {item.val}""" for item in self._buckets if item) return f"""HashMap({val_string})"""
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
from maths.prime_check import is_prime def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Dict = F"""Input value of [number={number}] must be an integer""" raise TypeError(__SCREAMING_SNAKE_CASE ) if is_prime(__SCREAMING_SNAKE_CASE ) and is_prime(number + 2 ): return number + 2 else: return -1 if __name__ == "__main__": import doctest doctest.testmod()
717
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_upernet''': ['''UperNetConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UperNetForSemanticSegmentation''', '''UperNetPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_upernet import UperNetConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
718
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
0
"""simple docstring""" import os from shutil import copyfile from typing import Any, Dict, List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = '''▁''' lowerCAmelCase_ = {'''vocab_file''': '''spiece.model'''} lowerCAmelCase_ = { '''vocab_file''': { '''google/reformer-crime-and-punishment''': ( '''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model''' ) } } lowerCAmelCase_ = { '''google/reformer-crime-and-punishment''': 524288, } class _snake_case ( __snake_case ): """simple docstring""" a = VOCAB_FILES_NAMES a = PRETRAINED_VOCAB_FILES_MAP a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES a = ["input_ids", "attention_mask"] def __init__( self : Optional[Any] , _A : int , _A : Any="</s>" , _A : Any="<unk>" , _A : Union[str, Any]=[] , _A : Optional[Dict[str, Any]] = None , **_A : List[Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs super().__init__( eos_token=_A , unk_token=_A , additional_special_tokens=_A , sp_model_kwargs=self.sp_model_kwargs , **_A , ) _SCREAMING_SNAKE_CASE : Optional[Any] = vocab_file _SCREAMING_SNAKE_CASE : str = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(_A) @property def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return self.sp_model.get_piece_size() def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = {self.convert_ids_to_tokens(_A): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.__dict__.copy() _SCREAMING_SNAKE_CASE : List[Any] = None return state def __setstate__( self : Dict , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = d # for backward compatibility if not hasattr(self , """sp_model_kwargs"""): _SCREAMING_SNAKE_CASE : int = {} _SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs) self.sp_model.Load(self.vocab_file) def _lowerCAmelCase ( self : Optional[int] , _A : str): """simple docstring""" return self.sp_model.encode(_A , out_type=_A) def _lowerCAmelCase ( self : Optional[Any] , _A : Optional[int]): """simple docstring""" return self.sp_model.piece_to_id(_A) def _lowerCAmelCase ( self : Any , _A : Union[str, Any]): """simple docstring""" if index < self.sp_model.get_piece_size(): _SCREAMING_SNAKE_CASE : str = self.sp_model.IdToPiece(_A) return token def _lowerCAmelCase ( self : Tuple , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = [] _SCREAMING_SNAKE_CASE : Any = """""" for token in tokens: # make sure that special tokens are not decoded using sentencepiece model if token in self.all_special_tokens: out_string += self.sp_model.decode(_A) + token _SCREAMING_SNAKE_CASE : Optional[int] = [] else: current_sub_tokens.append(_A) out_string += self.sp_model.decode(_A) return out_string.strip() def _lowerCAmelCase ( self : Tuple , _A : str , _A : Optional[str] = None): """simple docstring""" if not os.path.isdir(_A): logger.error(f"""Vocabulary path ({save_directory}) should be a directory""") return _SCREAMING_SNAKE_CASE : Any = os.path.join( _A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""]) if os.path.abspath(self.vocab_file) != os.path.abspath(_A) and os.path.isfile(self.vocab_file): copyfile(self.vocab_file , _A) elif not os.path.isfile(self.vocab_file): with open(_A , """wb""") as fi: _SCREAMING_SNAKE_CASE : List[str] = self.sp_model.serialized_model_proto() fi.write(_A) return (out_vocab_file,)
719
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_tokenizers_available, is_torch_available, ) lowerCAmelCase_ = { '''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''], '''tokenization_roformer''': ['''RoFormerTokenizer'''], } try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = ['''RoFormerTokenizerFast'''] try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''RoFormerForCausalLM''', '''RoFormerForMaskedLM''', '''RoFormerForMultipleChoice''', '''RoFormerForQuestionAnswering''', '''RoFormerForSequenceClassification''', '''RoFormerForTokenClassification''', '''RoFormerLayer''', '''RoFormerModel''', '''RoFormerPreTrainedModel''', '''load_tf_weights_in_roformer''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFRoFormerForCausalLM''', '''TFRoFormerForMaskedLM''', '''TFRoFormerForMultipleChoice''', '''TFRoFormerForQuestionAnswering''', '''TFRoFormerForSequenceClassification''', '''TFRoFormerForTokenClassification''', '''TFRoFormerLayer''', '''TFRoFormerModel''', '''TFRoFormerPreTrainedModel''', ] try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''', '''FlaxRoFormerForMaskedLM''', '''FlaxRoFormerForMultipleChoice''', '''FlaxRoFormerForQuestionAnswering''', '''FlaxRoFormerForSequenceClassification''', '''FlaxRoFormerForTokenClassification''', '''FlaxRoFormerModel''', '''FlaxRoFormerPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig from .tokenization_roformer import RoFormerTokenizer try: if not is_tokenizers_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .tokenization_roformer_fast import RoFormerTokenizerFast try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_roformer import ( ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, RoFormerForCausalLM, RoFormerForMaskedLM, RoFormerForMultipleChoice, RoFormerForQuestionAnswering, RoFormerForSequenceClassification, RoFormerForTokenClassification, RoFormerLayer, RoFormerModel, RoFormerPreTrainedModel, load_tf_weights_in_roformer, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_roformer import ( TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFRoFormerForCausalLM, TFRoFormerForMaskedLM, TFRoFormerForMultipleChoice, TFRoFormerForQuestionAnswering, TFRoFormerForSequenceClassification, TFRoFormerForTokenClassification, TFRoFormerLayer, TFRoFormerModel, TFRoFormerPreTrainedModel, ) try: if not is_flax_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_flax_roformer import ( FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, FlaxRoFormerForMaskedLM, FlaxRoFormerForMultipleChoice, FlaxRoFormerForQuestionAnswering, FlaxRoFormerForSequenceClassification, FlaxRoFormerForTokenClassification, FlaxRoFormerModel, FlaxRoFormerPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
720
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
0
"""simple docstring""" import unicodedata from dataclasses import dataclass from typing import Optional, Union import numpy as np from transformers.data.data_collator import DataCollatorMixin from transformers.file_utils import PaddingStrategy from transformers.tokenization_utils_base import PreTrainedTokenizerBase def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = np.full((len(__SCREAMING_SNAKE_CASE ), sequence_length, 2) , __SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Any = np.full((len(__SCREAMING_SNAKE_CASE ), sequence_length) , __SCREAMING_SNAKE_CASE ) for i, tensor in enumerate(__SCREAMING_SNAKE_CASE ): if padding_side == "right": if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = tensor[:sequence_length] else: _SCREAMING_SNAKE_CASE : int = tensor[:sequence_length] else: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = tensor[:sequence_length] else: _SCREAMING_SNAKE_CASE : List[str] = tensor[:sequence_length] return out_tensor.tolist() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ord(__SCREAMING_SNAKE_CASE ) if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126): return True _SCREAMING_SNAKE_CASE : Tuple = unicodedata.category(__SCREAMING_SNAKE_CASE ) if cat.startswith("""P""" ): return True return False @dataclass class _snake_case ( __snake_case ): """simple docstring""" a = 42 a = True a = None a = None a = -1_00 a = "pt" def _lowerCAmelCase ( self : List[str] , _A : str): """simple docstring""" import torch _SCREAMING_SNAKE_CASE : List[str] = """label""" if """label""" in features[0].keys() else """labels""" _SCREAMING_SNAKE_CASE : Optional[Any] = [feature[label_name] for feature in features] if label_name in features[0].keys() else None _SCREAMING_SNAKE_CASE : List[str] = self.tokenizer.pad( _A , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="""pt""" if labels is None else None , ) if labels is None: return batch _SCREAMING_SNAKE_CASE : Tuple = torch.tensor(batch["""entity_ids"""]).shape[1] _SCREAMING_SNAKE_CASE : Tuple = self.tokenizer.padding_side if padding_side == "right": _SCREAMING_SNAKE_CASE : Union[str, Any] = [ list(_A) + [self.label_pad_token_id] * (sequence_length - len(_A)) for label in labels ] else: _SCREAMING_SNAKE_CASE : List[str] = [ [self.label_pad_token_id] * (sequence_length - len(_A)) + list(_A) for label in labels ] _SCREAMING_SNAKE_CASE : Optional[int] = [feature["""ner_tags"""] for feature in features] _SCREAMING_SNAKE_CASE : Tuple = padding_tensor(_A , -1 , _A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = [feature["""original_entity_spans"""] for feature in features] _SCREAMING_SNAKE_CASE : Dict = padding_tensor(_A , (-1, -1) , _A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = {k: torch.tensor(_A , dtype=torch.intaa) for k, v in batch.items()} return batch
721
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
0
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: _SCREAMING_SNAKE_CASE : List[str] = (1 + 24 * n) ** 0.5 return ((1 + root) / 6) % 1 == 0 def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 5_000 )-> int: _SCREAMING_SNAKE_CASE : Any = [(i * (3 * i - 1)) // 2 for i in range(1 , __SCREAMING_SNAKE_CASE )] for i, pentagonal_i in enumerate(__SCREAMING_SNAKE_CASE ): for j in range(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ): _SCREAMING_SNAKE_CASE : Dict = pentagonal_nums[j] _SCREAMING_SNAKE_CASE : int = pentagonal_i + pentagonal_j _SCREAMING_SNAKE_CASE : Optional[Any] = pentagonal_j - pentagonal_i if is_pentagonal(__SCREAMING_SNAKE_CASE ) and is_pentagonal(__SCREAMING_SNAKE_CASE ): return b return -1 if __name__ == "__main__": print(F"{solution() = }")
700
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
0
"""simple docstring""" import os from typing import Optional import fsspec from fsspec.archive import AbstractArchiveFileSystem from fsspec.utils import DEFAULT_BLOCK_SIZE class _snake_case ( __snake_case ): """simple docstring""" a = "" a = ( None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz ) a = None # compression type in fsspec. ex: "gzip" a = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz def __init__( self : List[str] , _A : str = "" , _A : Optional[str] = None , _A : Optional[dict] = None , **_A : Any): """simple docstring""" super().__init__(self , **_A) # always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode _SCREAMING_SNAKE_CASE : List[Any] = fsspec.open( _A , mode="""rb""" , protocol=_A , compression=self.compression , client_kwargs={ """requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459 """trust_env""": True, # Enable reading proxy env variables. **(target_options or {}).pop("""client_kwargs""" , {}), # To avoid issues if it was already passed. } , **(target_options or {}) , ) _SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.basename(self.file.path.split("""::""")[0]) _SCREAMING_SNAKE_CASE : Union[str, Any] = ( self.compressed_name[: self.compressed_name.rindex(""".""")] if """.""" in self.compressed_name else self.compressed_name ) _SCREAMING_SNAKE_CASE : Optional[int] = None @classmethod def _lowerCAmelCase ( cls : Any , _A : Tuple): """simple docstring""" return super()._strip_protocol(_A).lstrip("""/""") def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" if self.dir_cache is None: _SCREAMING_SNAKE_CASE : Union[str, Any] = {**self.file.fs.info(self.file.path), """name""": self.uncompressed_name} _SCREAMING_SNAKE_CASE : List[str] = {f["""name"""]: f} def _lowerCAmelCase ( self : Optional[int] , _A : str): """simple docstring""" return self.file.open().read() def _lowerCAmelCase ( self : str , _A : str , _A : str = "rb" , _A : Dict=None , _A : Tuple=True , _A : List[str]=None , **_A : Tuple , ): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self._strip_protocol(_A) if mode != "rb": raise ValueError(f"""Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'""") return self.file.open() class _snake_case ( __snake_case ): """simple docstring""" a = "bz2" a = "bz2" a = ".bz2" class _snake_case ( __snake_case ): """simple docstring""" a = "gzip" a = "gzip" a = ".gz" class _snake_case ( __snake_case ): """simple docstring""" a = "lz4" a = "lz4" a = ".lz4" class _snake_case ( __snake_case ): """simple docstring""" a = "xz" a = "xz" a = ".xz" class _snake_case ( __snake_case ): """simple docstring""" a = "zstd" a = "zstd" a = ".zst" def __init__( self : int , _A : str , _A : str = "rb" , _A : Optional[str] = None , _A : Optional[dict] = None , _A : int = DEFAULT_BLOCK_SIZE , **_A : Dict , ): """simple docstring""" super().__init__( fo=_A , mode=_A , target_protocol=_A , target_options=_A , block_size=_A , **_A , ) # We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2: # # File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open # out.close = close # AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only # # see https://github.com/intake/filesystem_spec/issues/725 _SCREAMING_SNAKE_CASE : Optional[int] = self.file.__enter__ class _snake_case : """simple docstring""" def __init__( self : Tuple , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = file_ def __enter__( self : Optional[Any]): """simple docstring""" self._file.__enter__() return self def __exit__( self : Any , *_A : Tuple , **_A : Optional[int]): """simple docstring""" self._file.__exit__(*_A , **_A) def __iter__( self : int): """simple docstring""" return iter(self._file) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return next(self._file) def __getattr__( self : Dict , _A : str): """simple docstring""" return getattr(self._file , _A) def fixed_enter(*_A : Tuple , **_A : Any): return WrappedFile(_enter(*_A , **_A)) _SCREAMING_SNAKE_CASE : Optional[Any] = fixed_enter
701
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
0
"""simple docstring""" from collections.abc import Callable import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> np.ndarray: _SCREAMING_SNAKE_CASE : Tuple = int(np.ceil((x_end - xa) / step_size ) ) _SCREAMING_SNAKE_CASE : Optional[Any] = np.zeros((n + 1,) ) _SCREAMING_SNAKE_CASE : str = ya _SCREAMING_SNAKE_CASE : Optional[int] = xa for k in range(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = y[k] + step_size * ode_func(__SCREAMING_SNAKE_CASE , y[k] ) x += step_size return y if __name__ == "__main__": import doctest doctest.testmod()
702
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
0
from unittest.mock import patch import pyspark from datasets.packaged_modules.spark.spark import ( Spark, SparkExamplesIterable, _generate_iterable_examples, ) from ..utils import ( require_dill_gt_0_3_2, require_not_windows, ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Any = [] for part_id in partition_order: _SCREAMING_SNAKE_CASE : List[str] = df.where(F"""SPARK_PARTITION_ID() = {part_id}""" ).collect() for row_idx, row in enumerate(__SCREAMING_SNAKE_CASE ): expected_row_ids_and_row_dicts.append((F"""{part_id}_{row_idx}""", row.asDict()) ) return expected_row_ids_and_row_dicts @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Any = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() _SCREAMING_SNAKE_CASE : Union[str, Any] = spark.range(100 ).repartition(1 ) _SCREAMING_SNAKE_CASE : Optional[int] = Spark(__SCREAMING_SNAKE_CASE ) # The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means # that each partition can hold 2 rows. spark_builder._repartition_df_if_needed(max_shard_size=16 ) # Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions. assert spark_builder.df.rdd.getNumPartitions() == 50 @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase_()-> List[str]: _SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() _SCREAMING_SNAKE_CASE : List[Any] = spark.range(10 ).repartition(2 ) _SCREAMING_SNAKE_CASE : List[Any] = [1, 0] _SCREAMING_SNAKE_CASE : List[str] = _generate_iterable_examples(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Reverse the partitions. _SCREAMING_SNAKE_CASE : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for i, (row_id, row_dict) in enumerate(generate_fn() ): _SCREAMING_SNAKE_CASE : Union[str, Any] = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase_()-> Optional[int]: _SCREAMING_SNAKE_CASE : Union[str, Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() _SCREAMING_SNAKE_CASE : Union[str, Any] = spark.range(10 ).repartition(1 ) _SCREAMING_SNAKE_CASE : List[Any] = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ) assert it.n_shards == 1 for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ): assert row_id == F"""0_{i}""" assert row_dict == {"id": i} @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() _SCREAMING_SNAKE_CASE : int = spark.range(30 ).repartition(3 ) # Mock the generator so that shuffle reverses the partition indices. with patch("""numpy.random.Generator""" ) as generator_mock: _SCREAMING_SNAKE_CASE : List[str] = lambda __SCREAMING_SNAKE_CASE : x.reverse() _SCREAMING_SNAKE_CASE : int = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [2, 1, 0] ) _SCREAMING_SNAKE_CASE : Tuple = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shuffle_data_sources(__SCREAMING_SNAKE_CASE ) assert shuffled_it.n_shards == 3 for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = expected_row_ids_and_row_dicts[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() _SCREAMING_SNAKE_CASE : Any = spark.range(20 ).repartition(4 ) # Partitions 0 and 2 _SCREAMING_SNAKE_CASE : List[str] = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=0 , num_workers=2 ) assert shard_it_a.n_shards == 2 _SCREAMING_SNAKE_CASE : Optional[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [0, 2] ) for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict # Partitions 1 and 3 _SCREAMING_SNAKE_CASE : List[Any] = SparkExamplesIterable(__SCREAMING_SNAKE_CASE ).shard_data_sources(worker_id=1 , num_workers=2 ) assert shard_it_a.n_shards == 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(__SCREAMING_SNAKE_CASE , [1, 3] ) for i, (row_id, row_dict) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = expected_row_ids_and_row_dicts_a[i] assert row_id == expected_row_id assert row_dict == expected_row_dict @require_not_windows @require_dill_gt_0_3_2 def lowerCamelCase_()-> Optional[int]: _SCREAMING_SNAKE_CASE : Any = pyspark.sql.SparkSession.builder.master("""local[*]""" ).appName("""pyspark""" ).getOrCreate() _SCREAMING_SNAKE_CASE : Optional[int] = spark.range(100 ).repartition(1 ) _SCREAMING_SNAKE_CASE : Any = Spark(__SCREAMING_SNAKE_CASE ) # Choose a small max_shard_size for maximum partitioning. spark_builder._repartition_df_if_needed(max_shard_size=1 ) # The new number of partitions should not be greater than the number of rows. assert spark_builder.df.rdd.getNumPartitions() == 100
703
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
0
"""simple docstring""" import heapq as hq import math from collections.abc import Iterator class _snake_case : """simple docstring""" def __init__( self : Tuple , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = str(id_) _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : str = [] _SCREAMING_SNAKE_CASE : Any = {} # {vertex:distance} def __lt__( self : int , _A : str): """simple docstring""" return self.key < other.key def __repr__( self : Dict): """simple docstring""" return self.id def _lowerCAmelCase ( self : Optional[int] , _A : str): """simple docstring""" self.neighbors.append(_A) def _lowerCAmelCase ( self : Union[str, Any] , _A : Any , _A : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = weight def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # add the neighbors: graph[a - 1].add_neighbor(graph[b - 1] ) graph[b - 1].add_neighbor(graph[a - 1] ) # add the edges: graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE ) graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> list: _SCREAMING_SNAKE_CASE : str = [] for u in graph: _SCREAMING_SNAKE_CASE : int = math.inf _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Union[str, Any] = graph[:] while q: _SCREAMING_SNAKE_CASE : Optional[int] = min(__SCREAMING_SNAKE_CASE ) q.remove(__SCREAMING_SNAKE_CASE ) for v in u.neighbors: if (v in q) and (u.edges[v.id] < v.key): _SCREAMING_SNAKE_CASE : Union[str, Any] = u _SCREAMING_SNAKE_CASE : Any = u.edges[v.id] for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ): a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) ) return a def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Iterator[tuple]: for u in graph: _SCREAMING_SNAKE_CASE : str = math.inf _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : Dict = 0 _SCREAMING_SNAKE_CASE : Tuple = list(__SCREAMING_SNAKE_CASE ) hq.heapify(__SCREAMING_SNAKE_CASE ) while h: _SCREAMING_SNAKE_CASE : Dict = hq.heappop(__SCREAMING_SNAKE_CASE ) for v in u.neighbors: if (v in h) and (u.edges[v.id] < v.key): _SCREAMING_SNAKE_CASE : int = u _SCREAMING_SNAKE_CASE : Optional[int] = u.edges[v.id] hq.heapify(__SCREAMING_SNAKE_CASE ) for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ): yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) def lowerCamelCase_()-> None: pass if __name__ == "__main__": import doctest doctest.testmod()
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
"""simple docstring""" import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
705
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
0
"""simple docstring""" from collections.abc import Iterable from typing import Generic, TypeVar lowerCAmelCase_ = TypeVar('''_T''') class _snake_case ( Generic[_T] ): """simple docstring""" def __init__( self : Union[str, Any] , _A : Iterable[_T] | None = None): """simple docstring""" _SCREAMING_SNAKE_CASE : list[_T] = list(iterable or []) _SCREAMING_SNAKE_CASE : list[_T] = [] def __len__( self : List[Any]): """simple docstring""" return len(self._stacka) + len(self._stacka) def __repr__( self : Optional[Any]): """simple docstring""" return f"""Queue({tuple(self._stacka[::-1] + self._stacka)})""" def _lowerCAmelCase ( self : Dict , _A : _T): """simple docstring""" self._stacka.append(_A) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self._stacka.pop _SCREAMING_SNAKE_CASE : List[str] = self._stacka.append if not self._stacka: while self._stacka: stacka_append(stacka_pop()) if not self._stacka: raise IndexError("""Queue is empty""") return self._stacka.pop() if __name__ == "__main__": from doctest import testmod testmod()
706
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
0
"""simple docstring""" import itertools import os from collections import Counter, defaultdict from concurrent.futures import ThreadPoolExecutor, as_completed import numpy as np import datasets from .execute import check_correctness lowerCAmelCase_ = '''\ @misc{chen2021evaluating, title={Evaluating Large Language Models Trained on Code}, author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \ and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \ and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \ and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \ and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \ and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \ and Mohammad Bavarian and Clemens Winter and Philippe Tillet \ and Felipe Petroski Such and Dave Cummings and Matthias Plappert \ and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \ and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \ and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \ and William Saunders and Christopher Hesse and Andrew N. Carr \ and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \ and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \ and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \ and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba}, year={2021}, eprint={2107.03374}, archivePrefix={arXiv}, primaryClass={cs.LG} } ''' lowerCAmelCase_ = '''\ This metric implements the evaluation harness for the HumanEval problem solving dataset described in the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). ''' lowerCAmelCase_ = ''' Calculates how good are predictions given some references, using certain scores Args: predictions: list of candidates to evaluate. Each candidates should be a list of strings with several code candidates to solve the problem. references: a list with a test for each prediction. Each test should evaluate the correctness of a code candidate. k: number of code candidates to consider in the evaluation (Default: [1, 10, 100]) num_workers: number of workers used to evaluate the canidate programs (Default: 4). timeout: Returns: pass_at_k: dict with pass rates for each k results: dict with granular results of each unittest Examples: >>> code_eval = datasets.load_metric("code_eval") >>> test_cases = ["assert add(2,3)==5"] >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]] >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2]) >>> print(pass_at_k) {\'pass@1\': 0.5, \'pass@2\': 1.0} ''' lowerCAmelCase_ = ''' ################################################################################ !!!WARNING!!! ################################################################################ The "code_eval" metric executes untrusted model-generated code in Python. Although it is highly unlikely that model-generated code will do something overtly malicious in response to this test suite, model-generated code may act destructively due to a lack of model capability or alignment. Users are strongly encouraged to sandbox this evaluation suite so that it does not perform destructive actions on their host or network. For more information on how OpenAI sandboxes its code, see the paper "Evaluating Large Language Models Trained on Code" (https://arxiv.org/abs/2107.03374). Once you have read this disclaimer and taken appropriate precautions, set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this with: >>> import os >>> os.environ["HF_ALLOW_CODE_EVAL"] = "1" ################################################################################\ ''' lowerCAmelCase_ = '''The MIT License Copyright (c) OpenAI (https://openai.com) Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' @datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION ) class _snake_case ( datasets.Metric ): """simple docstring""" def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return datasets.MetricInfo( # This is the description that will appear on the metrics page. description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features( { """predictions""": datasets.Sequence(datasets.Value("""string""")), """references""": datasets.Value("""string"""), }) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , ) def _lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : List[Any] , _A : List[Any]=[1, 1_0, 1_0_0] , _A : Union[str, Any]=4 , _A : Optional[int]=3.0): """simple docstring""" if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0) != "1": raise ValueError(_WARNING) if os.name == "nt": raise NotImplementedError("""This metric is currently not supported on Windows.""") with ThreadPoolExecutor(max_workers=_A) as executor: _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[Any] = Counter() _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 _SCREAMING_SNAKE_CASE : Tuple = defaultdict(_A) for task_id, (candidates, test_case) in enumerate(zip(_A , _A)): for candidate in candidates: _SCREAMING_SNAKE_CASE : List[str] = candidate + """\n""" + test_case _SCREAMING_SNAKE_CASE : Optional[int] = (test_program, timeout, task_id, completion_id[task_id]) _SCREAMING_SNAKE_CASE : List[str] = executor.submit(_A , *_A) futures.append(_A) completion_id[task_id] += 1 n_samples += 1 for future in as_completed(_A): _SCREAMING_SNAKE_CASE : int = future.result() results[result["task_id"]].append((result["""completion_id"""], result)) _SCREAMING_SNAKE_CASE : List[Any] = [], [] for result in results.values(): result.sort() _SCREAMING_SNAKE_CASE : List[str] = [r[1]["""passed"""] for r in result] total.append(len(_A)) correct.append(sum(_A)) _SCREAMING_SNAKE_CASE : List[str] = np.array(_A) _SCREAMING_SNAKE_CASE : List[str] = np.array(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = k _SCREAMING_SNAKE_CASE : Optional[int] = {f"""pass@{k}""": estimate_pass_at_k(_A , _A , _A).mean() for k in ks if (total >= k).all()} return pass_at_k, results def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: def estimator(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float: if n - c < k: return 1.0 return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) ) if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[Any] = itertools.repeat(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) ) else: assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[Any] = iter(__SCREAMING_SNAKE_CASE ) return np.array([estimator(int(__SCREAMING_SNAKE_CASE ) , int(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ) for n, c in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )] )
707
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
0
"""simple docstring""" from collections import OrderedDict from typing import List, Mapping from packaging import version from ...configuration_utils import PretrainedConfig from ...onnx import OnnxConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''', } class _snake_case ( __snake_case ): """simple docstring""" a = "efficientnet" def __init__( self : int , _A : int = 3 , _A : int = 6_0_0 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [3_2, 1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2] , _A : List[int] = [1_6, 2_4, 4_0, 8_0, 1_1_2, 1_9_2, 3_2_0] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2_5_6_0 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.001 , _A : float = 0.99 , _A : float = 0.5 , _A : float = 0.2 , **_A : List[str] , ): """simple docstring""" super().__init__(**_A) _SCREAMING_SNAKE_CASE : List[str] = num_channels _SCREAMING_SNAKE_CASE : Union[str, Any] = image_size _SCREAMING_SNAKE_CASE : Optional[Any] = width_coefficient _SCREAMING_SNAKE_CASE : Tuple = depth_coefficient _SCREAMING_SNAKE_CASE : Dict = depth_divisor _SCREAMING_SNAKE_CASE : Optional[Any] = kernel_sizes _SCREAMING_SNAKE_CASE : Dict = in_channels _SCREAMING_SNAKE_CASE : Any = out_channels _SCREAMING_SNAKE_CASE : Dict = depthwise_padding _SCREAMING_SNAKE_CASE : str = strides _SCREAMING_SNAKE_CASE : Dict = num_block_repeats _SCREAMING_SNAKE_CASE : Tuple = expand_ratios _SCREAMING_SNAKE_CASE : int = squeeze_expansion_ratio _SCREAMING_SNAKE_CASE : int = hidden_act _SCREAMING_SNAKE_CASE : int = hidden_dim _SCREAMING_SNAKE_CASE : Union[str, Any] = pooling_type _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[str] = batch_norm_eps _SCREAMING_SNAKE_CASE : Tuple = batch_norm_momentum _SCREAMING_SNAKE_CASE : List[str] = dropout_rate _SCREAMING_SNAKE_CASE : Optional[Any] = drop_connect_rate _SCREAMING_SNAKE_CASE : int = sum(_A) * 4 class _snake_case ( __snake_case ): """simple docstring""" a = version.parse("1.11" ) @property def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" return OrderedDict( [ ("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}), ]) @property def _lowerCAmelCase ( self : str): """simple docstring""" return 1e-5
708
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
0
"""simple docstring""" from math import pow, sqrt def lowerCamelCase_(*__SCREAMING_SNAKE_CASE )-> bool: _SCREAMING_SNAKE_CASE : str = len(__SCREAMING_SNAKE_CASE ) > 0 and all(value > 0.0 for value in values ) return result def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float | ValueError: return ( round(sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else ValueError("""Input Error: Molar mass values must greater than 0.""" ) ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float | ValueError: return ( round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float | ValueError: return ( round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 ) if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float | ValueError: return ( round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 ) if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float | ValueError: return ( round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 ) if validate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else ValueError( """Input Error: Molar mass and effusion rate values must greater than 0.""" ) )
709
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available lowerCAmelCase_ = { '''configuration_tapas''': ['''TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TapasConfig'''], '''tokenization_tapas''': ['''TapasTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TapasForMaskedLM''', '''TapasForQuestionAnswering''', '''TapasForSequenceClassification''', '''TapasModel''', '''TapasPreTrainedModel''', '''load_tf_weights_in_tapas''', ] try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST''', '''TFTapasForMaskedLM''', '''TFTapasForQuestionAnswering''', '''TFTapasForSequenceClassification''', '''TFTapasModel''', '''TFTapasPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig from .tokenization_tapas import TapasTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasPreTrainedModel, load_tf_weights_in_tapas, ) try: if not is_tf_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_tf_tapas import ( TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TFTapasForMaskedLM, TFTapasForQuestionAnswering, TFTapasForSequenceClassification, TFTapasModel, TFTapasPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
710
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
0
import inspect import re from transformers.utils import direct_transformers_import # All paths are set with the intent you should run this script from the root of the repo with the command # python utils/check_config_docstrings.py lowerCAmelCase_ = '''src/transformers''' # This is to make sure the transformers module imported is the one in the repo. lowerCAmelCase_ = direct_transformers_import(PATH_TO_TRANSFORMERS) lowerCAmelCase_ = transformers.models.auto.configuration_auto.CONFIG_MAPPING # Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`. # For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)` lowerCAmelCase_ = re.compile(R'''\[(.+?)\]\((https://huggingface\.co/.+?)\)''') lowerCAmelCase_ = { '''DecisionTransformerConfig''', '''EncoderDecoderConfig''', '''MusicgenConfig''', '''RagConfig''', '''SpeechEncoderDecoderConfig''', '''TimmBackboneConfig''', '''VisionEncoderDecoderConfig''', '''VisionTextDualEncoderConfig''', '''LlamaConfig''', } def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = None # source code of `config_class` _SCREAMING_SNAKE_CASE : str = inspect.getsource(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = _re_checkpoint.findall(__SCREAMING_SNAKE_CASE ) # Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link. # For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')` for ckpt_name, ckpt_link in checkpoints: # allow the link to end with `/` if ckpt_link.endswith("""/""" ): _SCREAMING_SNAKE_CASE : Optional[Any] = ckpt_link[:-1] # verify the checkpoint name corresponds to the checkpoint link _SCREAMING_SNAKE_CASE : Tuple = F"""https://huggingface.co/{ckpt_name}""" if ckpt_link == ckpt_link_from_name: _SCREAMING_SNAKE_CASE : int = ckpt_name break return checkpoint def lowerCamelCase_()-> List[str]: _SCREAMING_SNAKE_CASE : str = [] for config_class in list(CONFIG_MAPPING.values() ): # Skip deprecated models if "models.deprecated" in config_class.__module__: continue _SCREAMING_SNAKE_CASE : int = get_checkpoint_from_config_class(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = config_class.__name__ if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK: configs_without_checkpoint.append(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[Any] = """\n""".join(sorted(__SCREAMING_SNAKE_CASE ) ) raise ValueError(F"""The following configurations don't contain any valid checkpoint:\n{message}""" ) if __name__ == "__main__": check_config_docstrings_have_checkpoints()
711
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
0
"""simple docstring""" from functools import lru_cache def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> set: _SCREAMING_SNAKE_CASE : int = 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = set() while i * i <= n: if n % i: i += 1 else: n //= i factors.add(__SCREAMING_SNAKE_CASE ) if n > 1: factors.add(__SCREAMING_SNAKE_CASE ) return factors @lru_cache def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: return len(unique_prime_factors(__SCREAMING_SNAKE_CASE ) ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return len(set(__SCREAMING_SNAKE_CASE ) ) in (0, 1) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> list: _SCREAMING_SNAKE_CASE : Tuple = 2 while True: # Increment each value of a generated range _SCREAMING_SNAKE_CASE : Any = [base + i for i in range(__SCREAMING_SNAKE_CASE )] # Run elements through out unique_prime_factors function # Append our target number to the end. _SCREAMING_SNAKE_CASE : List[str] = [upf_len(__SCREAMING_SNAKE_CASE ) for x in group] checker.append(__SCREAMING_SNAKE_CASE ) # If all numbers in the list are equal, return the group variable. if equality(__SCREAMING_SNAKE_CASE ): return group # Increment our base variable by 1 base += 1 def lowerCamelCase_(__SCREAMING_SNAKE_CASE = 4 )-> int: _SCREAMING_SNAKE_CASE : Tuple = run(__SCREAMING_SNAKE_CASE ) return results[0] if len(__SCREAMING_SNAKE_CASE ) else None if __name__ == "__main__": print(solution())
712
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
0
def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: if len(__SCREAMING_SNAKE_CASE ) != len(__SCREAMING_SNAKE_CASE ): raise ValueError("""String lengths must match!""" ) _SCREAMING_SNAKE_CASE : Optional[int] = 0 for chara, chara in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): if chara != chara: count += 1 return count if __name__ == "__main__": import doctest doctest.testmod()
713
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
0
"""simple docstring""" import unittest from transformers import ( MODEL_FOR_OBJECT_DETECTION_MAPPING, AutoFeatureExtractor, AutoModelForObjectDetection, ObjectDetectionPipeline, is_vision_available, pipeline, ) from transformers.testing_utils import ( is_pipeline_test, nested_simplify, require_pytesseract, require_tf, require_timm, require_torch, require_vision, slow, ) from .test_pipelines_common import ANY if is_vision_available(): from PIL import Image else: class _snake_case : """simple docstring""" @staticmethod def _lowerCAmelCase ( *_A : Union[str, Any] , **_A : List[Any]): """simple docstring""" pass @is_pipeline_test @require_vision @require_timm @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" a = MODEL_FOR_OBJECT_DETECTION_MAPPING def _lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : Any , _A : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=_A , image_processor=_A) return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"] def _lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE = object_detector("""./tests/fixtures/tests_samples/COCO/000000039769.png""" , threshold=0.0) self.assertGreater(len(_A) , 0) for detected_object in outputs: self.assertEqual( _A , { """score""": ANY(_A), """label""": ANY(_A), """box""": {"""xmin""": ANY(_A), """ymin""": ANY(_A), """xmax""": ANY(_A), """ymax""": ANY(_A)}, } , ) import datasets _SCREAMING_SNAKE_CASE = datasets.load_dataset("""hf-internal-testing/fixtures_image_utils""" , """image""" , split="""test""") _SCREAMING_SNAKE_CASE = [ Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png"""), """http://images.cocodataset.org/val2017/000000039769.jpg""", # RGBA dataset[0]["""file"""], # LA dataset[1]["""file"""], # L dataset[2]["""file"""], ] _SCREAMING_SNAKE_CASE = object_detector(_A , threshold=0.0) self.assertEqual(len(_A) , len(_A)) for outputs in batch_outputs: self.assertGreater(len(_A) , 0) for detected_object in outputs: self.assertEqual( _A , { """score""": ANY(_A), """label""": ANY(_A), """box""": {"""xmin""": ANY(_A), """ymin""": ANY(_A), """xmax""": ANY(_A), """ymax""": ANY(_A)}, } , ) @require_tf @unittest.skip("""Object detection not implemented in TF""") def _lowerCAmelCase ( self : Dict): """simple docstring""" pass @require_torch def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE = """hf-internal-testing/tiny-detr-mobilenetsv3""" _SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(_A) _SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(_A) _SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=_A , feature_extractor=_A) _SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=0.0) self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ] , ) _SCREAMING_SNAKE_CASE = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ] , threshold=0.0 , ) self.assertEqual( nested_simplify(_A , decimals=4) , [ [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ], [ {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, {"""score""": 0.3_376, """label""": """LABEL_0""", """box""": {"""xmin""": 1_5_9, """ymin""": 1_2_0, """xmax""": 4_8_0, """ymax""": 3_5_9}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50""" _SCREAMING_SNAKE_CASE = AutoModelForObjectDetection.from_pretrained(_A) _SCREAMING_SNAKE_CASE = AutoFeatureExtractor.from_pretrained(_A) _SCREAMING_SNAKE_CASE = ObjectDetectionPipeline(model=_A , feature_extractor=_A) _SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""") self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) _SCREAMING_SNAKE_CASE = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ]) self.assertEqual( nested_simplify(_A , decimals=4) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50""" _SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=_A) _SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""") self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) _SCREAMING_SNAKE_CASE = object_detector( [ """http://images.cocodataset.org/val2017/000000039769.jpg""", """http://images.cocodataset.org/val2017/000000039769.jpg""", ]) self.assertEqual( nested_simplify(_A , decimals=4) , [ [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], [ {"""score""": 0.9_982, """label""": """remote""", """box""": {"""xmin""": 4_0, """ymin""": 7_0, """xmax""": 1_7_5, """ymax""": 1_1_7}}, {"""score""": 0.9_960, """label""": """remote""", """box""": {"""xmin""": 3_3_3, """ymin""": 7_2, """xmax""": 3_6_8, """ymax""": 1_8_7}}, {"""score""": 0.9_955, """label""": """couch""", """box""": {"""xmin""": 0, """ymin""": 1, """xmax""": 6_3_9, """ymax""": 4_7_3}}, {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ], ] , ) @require_torch @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE = 0.9_985 _SCREAMING_SNAKE_CASE = """facebook/detr-resnet-50""" _SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=_A) _SCREAMING_SNAKE_CASE = object_detector("""http://images.cocodataset.org/val2017/000000039769.jpg""" , threshold=_A) self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_988, """label""": """cat""", """box""": {"""xmin""": 1_3, """ymin""": 5_2, """xmax""": 3_1_4, """ymax""": 4_7_0}}, {"""score""": 0.9_987, """label""": """cat""", """box""": {"""xmin""": 3_4_5, """ymin""": 2_3, """xmax""": 6_4_0, """ymax""": 3_6_8}}, ] , ) @require_torch @require_pytesseract @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE = """Narsil/layoutlmv3-finetuned-funsd""" _SCREAMING_SNAKE_CASE = 0.9_993 _SCREAMING_SNAKE_CASE = pipeline("""object-detection""" , model=_A , threshold=_A) _SCREAMING_SNAKE_CASE = object_detector( """https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png""") self.assertEqual( nested_simplify(_A , decimals=4) , [ {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}}, {"""score""": 0.9_993, """label""": """I-ANSWER""", """box""": {"""xmin""": 2_9_4, """ymin""": 2_5_4, """xmax""": 3_4_3, """ymax""": 2_6_4}}, ] , )
714
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
0
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''', '''LiltForQuestionAnswering''', '''LiltForSequenceClassification''', '''LiltForTokenClassification''', '''LiltModel''', '''LiltPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_lilt import ( LILT_PRETRAINED_MODEL_ARCHIVE_LIST, LiltForQuestionAnswering, LiltForSequenceClassification, LiltForTokenClassification, LiltModel, LiltPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
715
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
0
"""simple docstring""" from heapq import heappop, heappush import numpy as np def lowerCamelCase_( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple[float | int, list[tuple[int, int]]]: _SCREAMING_SNAKE_CASE : int = grid.shape _SCREAMING_SNAKE_CASE : str = [-1, 1, 0, 0] _SCREAMING_SNAKE_CASE : Any = [0, 0, -1, 1] if allow_diagonal: dx += [-1, -1, 1, 1] dy += [-1, 1, -1, 1] _SCREAMING_SNAKE_CASE : Any = [(0, source)], set() _SCREAMING_SNAKE_CASE : str = np.full((rows, cols) , np.inf ) _SCREAMING_SNAKE_CASE : Dict = 0 _SCREAMING_SNAKE_CASE : List[Any] = np.empty((rows, cols) , dtype=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = None while queue: (_SCREAMING_SNAKE_CASE) : Any = heappop(__SCREAMING_SNAKE_CASE ) if (x, y) in visited: continue visited.add((x, y) ) if (x, y) == destination: _SCREAMING_SNAKE_CASE : Optional[Any] = [] while (x, y) != source: path.append((x, y) ) _SCREAMING_SNAKE_CASE : Optional[int] = predecessors[x, y] path.append(__SCREAMING_SNAKE_CASE ) # add the source manually path.reverse() return matrix[destination], path for i in range(len(__SCREAMING_SNAKE_CASE ) ): _SCREAMING_SNAKE_CASE : Tuple = x + dx[i], y + dy[i] if 0 <= nx < rows and 0 <= ny < cols: _SCREAMING_SNAKE_CASE : Optional[Any] = grid[nx][ny] if next_node == 1 and matrix[nx, ny] > dist + 1: heappush(__SCREAMING_SNAKE_CASE , (dist + 1, (nx, ny)) ) _SCREAMING_SNAKE_CASE : Any = dist + 1 _SCREAMING_SNAKE_CASE : Optional[int] = (x, y) return np.inf, [] if __name__ == "__main__": import doctest doctest.testmod()
716
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available lowerCAmelCase_ = { '''configuration_clipseg''': [ '''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CLIPSegConfig''', '''CLIPSegTextConfig''', '''CLIPSegVisionConfig''', ], '''processing_clipseg''': ['''CLIPSegProcessor'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CLIPSegModel''', '''CLIPSegPreTrainedModel''', '''CLIPSegTextModel''', '''CLIPSegVisionModel''', '''CLIPSegForImageSegmentation''', ] if TYPE_CHECKING: from .configuration_clipseg import ( CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP, CLIPSegConfig, CLIPSegTextConfig, CLIPSegVisionConfig, ) from .processing_clipseg import CLIPSegProcessor try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_clipseg import ( CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST, CLIPSegForImageSegmentation, CLIPSegModel, CLIPSegPreTrainedModel, CLIPSegTextModel, CLIPSegVisionModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
from typing import TYPE_CHECKING from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available lowerCAmelCase_ = { '''configuration_canine''': ['''CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CanineConfig'''], '''tokenization_canine''': ['''CanineTokenizer'''], } try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''CANINE_PRETRAINED_MODEL_ARCHIVE_LIST''', '''CanineForMultipleChoice''', '''CanineForQuestionAnswering''', '''CanineForSequenceClassification''', '''CanineForTokenClassification''', '''CanineLayer''', '''CanineModel''', '''CaninePreTrainedModel''', '''load_tf_weights_in_canine''', ] if TYPE_CHECKING: from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig from .tokenization_canine import CanineTokenizer try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_canine import ( CANINE_PRETRAINED_MODEL_ARCHIVE_LIST, CanineForMultipleChoice, CanineForQuestionAnswering, CanineForSequenceClassification, CanineForTokenClassification, CanineLayer, CanineModel, CaninePreTrainedModel, load_tf_weights_in_canine, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
717
"""simple docstring""" import numpy as np from cva import destroyAllWindows, imread, imshow, waitKey class _snake_case : """simple docstring""" def __init__( self : int , _A : List[Any] , _A : int , _A : int): """simple docstring""" if dst_width < 0 or dst_height < 0: raise ValueError("""Destination width/height should be > 0""") _SCREAMING_SNAKE_CASE : str = img _SCREAMING_SNAKE_CASE : Optional[Any] = img.shape[1] _SCREAMING_SNAKE_CASE : Tuple = img.shape[0] _SCREAMING_SNAKE_CASE : Any = dst_width _SCREAMING_SNAKE_CASE : Any = dst_height _SCREAMING_SNAKE_CASE : Any = self.src_w / self.dst_w _SCREAMING_SNAKE_CASE : Dict = self.src_h / self.dst_h _SCREAMING_SNAKE_CASE : Optional[Any] = ( np.ones((self.dst_h, self.dst_w, 3) , np.uinta) * 2_5_5 ) def _lowerCAmelCase ( self : Tuple): """simple docstring""" for i in range(self.dst_h): for j in range(self.dst_w): _SCREAMING_SNAKE_CASE : Any = self.img[self.get_y(_A)][self.get_x(_A)] def _lowerCAmelCase ( self : int , _A : int): """simple docstring""" return int(self.ratio_x * x) def _lowerCAmelCase ( self : str , _A : int): """simple docstring""" return int(self.ratio_y * y) if __name__ == "__main__": lowerCAmelCase_ , lowerCAmelCase_ = 800, 600 lowerCAmelCase_ = imread('''image_data/lena.jpg''', 1) lowerCAmelCase_ = NearestNeighbour(im, dst_w, dst_h) n.process() imshow( F"Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}", n.output ) waitKey(0) destroyAllWindows()
635
0
def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> list[list]: _SCREAMING_SNAKE_CASE : Any = current_set.copy() for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = row[0] for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ): if magnitude == 0: _SCREAMING_SNAKE_CASE : List[Any] = column continue _SCREAMING_SNAKE_CASE : List[str] = column / magnitude # Subtract to cancel term _SCREAMING_SNAKE_CASE : int = current_set[0] _SCREAMING_SNAKE_CASE : List[str] = [first_row] _SCREAMING_SNAKE_CASE : List[str] = current_set[1::] for row in current_set: _SCREAMING_SNAKE_CASE : Tuple = [] # If first term is 0, it is already in form we want, so we preserve it if row[0] == 0: final_set.append(__SCREAMING_SNAKE_CASE ) continue for column_index in range(len(__SCREAMING_SNAKE_CASE ) ): temp_row.append(first_row[column_index] - row[column_index] ) final_set.append(__SCREAMING_SNAKE_CASE ) # Create next recursion iteration set if len(final_set[0] ) != 3: _SCREAMING_SNAKE_CASE : List[Any] = final_set[0] _SCREAMING_SNAKE_CASE : Union[str, Any] = [] _SCREAMING_SNAKE_CASE : int = [] for row in final_set[1::]: current_first_column.append(row[0] ) next_iteration.append(row[1::] ) _SCREAMING_SNAKE_CASE : Optional[Any] = simplify(__SCREAMING_SNAKE_CASE ) for i in range(len(__SCREAMING_SNAKE_CASE ) ): resultant[i].insert(0 , current_first_column[i] ) resultant.insert(0 , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = resultant return final_set def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> list: if len(__SCREAMING_SNAKE_CASE ) == 0: raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = len(__SCREAMING_SNAKE_CASE ) + 1 if any(len(__SCREAMING_SNAKE_CASE ) != _length for item in equations ): raise IndexError("""solve_simultaneous() requires n lists of length n+1""" ) for row in equations: if any(not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ) for column in row ): raise ValueError("""solve_simultaneous() requires lists of integers""" ) if len(__SCREAMING_SNAKE_CASE ) == 1: return [equations[0][-1] / equations[0][0]] _SCREAMING_SNAKE_CASE : Optional[int] = equations.copy() if any(0 in row for row in data_set ): _SCREAMING_SNAKE_CASE : List[Any] = data_set.copy() _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for row_index, row in enumerate(__SCREAMING_SNAKE_CASE ): if 0 not in row: _SCREAMING_SNAKE_CASE : Optional[Any] = data_set.pop(__SCREAMING_SNAKE_CASE ) break if not full_row: raise ValueError("""solve_simultaneous() requires at least 1 full equation""" ) data_set.insert(0 , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[Any] = data_set.copy() _SCREAMING_SNAKE_CASE : Dict = simplify(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = simplified[::-1] _SCREAMING_SNAKE_CASE : list = [] for row in simplified: _SCREAMING_SNAKE_CASE : Optional[int] = row[-1] if not solutions: if row[-2] == 0: solutions.append(0 ) continue solutions.append(current_solution / row[-2] ) continue _SCREAMING_SNAKE_CASE : List[str] = row.copy()[: len(__SCREAMING_SNAKE_CASE ) - 1 :] while temp_row[0] == 0: temp_row.pop(0 ) if len(__SCREAMING_SNAKE_CASE ) == 0: solutions.append(0 ) continue _SCREAMING_SNAKE_CASE : int = temp_row[1::] _SCREAMING_SNAKE_CASE : str = temp_row[::-1] for column_index, column in enumerate(__SCREAMING_SNAKE_CASE ): current_solution -= column * solutions[column_index] solutions.append(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = [] for item in solutions: final.append(float(round(__SCREAMING_SNAKE_CASE , 5 ) ) ) return final[::-1] if __name__ == "__main__": import doctest doctest.testmod() lowerCAmelCase_ = [ [2, 1, 1, 1, 1, 4], [1, 2, 1, 1, 1, 5], [1, 1, 2, 1, 1, 6], [1, 1, 1, 2, 1, 7], [1, 1, 1, 1, 2, 8], ] print(solve_simultaneous(eq)) print(solve_simultaneous([[4, 2]]))
718
"""simple docstring""" import argparse from collections import defaultdict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : str = F"""{file}_{class_name}_{test_name}""" done_test[_id] += 1 with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = f.readlines() _SCREAMING_SNAKE_CASE : Optional[Any] = F"""class {class_name}(""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{4 * " "}def {test_name}(""" _SCREAMING_SNAKE_CASE : Tuple = F"""{8 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[Any] = F"""{16 * " "}{correct_line.split()[0]}""" _SCREAMING_SNAKE_CASE : List[str] = False _SCREAMING_SNAKE_CASE : Tuple = False _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[int] = False _SCREAMING_SNAKE_CASE : Any = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = 0 _SCREAMING_SNAKE_CASE : Dict = [] for line in lines: if line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = True elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : str = True elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )): _SCREAMING_SNAKE_CASE : Dict = len(line.split(correct_line.split()[0] )[0] ) count += 1 if count == done_test[_id]: _SCREAMING_SNAKE_CASE : int = True if in_class and in_func and in_line: if ")" not in line: continue else: _SCREAMING_SNAKE_CASE : Any = True if in_class and in_func and in_line and insert_line: new_lines.append(F"""{spaces * " "}{correct_line}""" ) _SCREAMING_SNAKE_CASE : Optional[int] = False else: new_lines.append(__SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" ) as f: for line in new_lines: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None )-> Optional[Any]: if fail is not None: with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : Union[str, Any] = {l.strip() for l in f.readlines()} else: _SCREAMING_SNAKE_CASE : str = None with open(__SCREAMING_SNAKE_CASE , """r""" ) as f: _SCREAMING_SNAKE_CASE : str = f.readlines() _SCREAMING_SNAKE_CASE : str = defaultdict(__SCREAMING_SNAKE_CASE ) for line in correct_lines: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = line.split(""";""" ) if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures: overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--correct_filename''', help='''filename of tests with expected result''') parser.add_argument('''--fail_filename''', help='''filename of test failures''', type=str, default=None) lowerCAmelCase_ = parser.parse_args() main(args.correct_filename, args.fail_filename)
635
0
"""simple docstring""" import argparse import torch from torch import nn from transformers import MBartConfig, MBartForConditionalGeneration def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : str = [ """encoder.version""", """decoder.version""", """model.encoder.version""", """model.decoder.version""", """_float_tensor""", """decoder.output_projection.weight""", ] for k in ignore_keys: state_dict.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Optional[Any] = emb.weight.shape _SCREAMING_SNAKE_CASE : str = nn.Linear(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , bias=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = emb.weight.data return lin_layer def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="facebook/mbart-large-en-ro" , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""" )["""model"""] remove_ignore_keys_(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = state_dict["""encoder.embed_tokens.weight"""].shape[0] _SCREAMING_SNAKE_CASE : str = MBartConfig.from_pretrained(__SCREAMING_SNAKE_CASE , vocab_size=__SCREAMING_SNAKE_CASE ) if mbart_aa and finetuned: _SCREAMING_SNAKE_CASE : List[str] = """relu""" _SCREAMING_SNAKE_CASE : Any = state_dict["""decoder.embed_tokens.weight"""] _SCREAMING_SNAKE_CASE : Dict = MBartForConditionalGeneration(__SCREAMING_SNAKE_CASE ) model.model.load_state_dict(__SCREAMING_SNAKE_CASE ) if finetuned: _SCREAMING_SNAKE_CASE : Optional[Any] = make_linear_from_emb(model.model.shared ) return model if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''fairseq_path''', type=str, help='''bart.large, bart.large.cnn or a path to a model.pt on local filesystem.''' ) parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument( '''--hf_config''', default='''facebook/mbart-large-cc25''', type=str, help='''Which huggingface architecture to use: mbart-large''', ) parser.add_argument('''--mbart_50''', action='''store_true''', help='''whether the model is mMART-50 checkpoint''') parser.add_argument('''--finetuned''', action='''store_true''', help='''whether the model is a fine-tuned checkpoint''') lowerCAmelCase_ = parser.parse_args() lowerCAmelCase_ = convert_fairseq_mbart_checkpoint_from_disk( args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa ) model.save_pretrained(args.pytorch_dump_folder_path)
719
"""simple docstring""" import argparse import re import torch from CLAP import create_model from transformers import AutoFeatureExtractor, ClapConfig, ClapModel lowerCAmelCase_ = { '''text_branch''': '''text_model''', '''audio_branch''': '''audio_model.audio_encoder''', '''attn''': '''attention.self''', '''self.proj''': '''output.dense''', '''attention.self_mask''': '''attn_mask''', '''mlp.fc1''': '''intermediate.dense''', '''mlp.fc2''': '''output.dense''', '''norm1''': '''layernorm_before''', '''norm2''': '''layernorm_after''', '''bn0''': '''batch_norm''', } lowerCAmelCase_ = AutoFeatureExtractor.from_pretrained('''laion/clap-htsat-unfused''', truncation='''rand_trunc''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> str: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = create_model( """HTSAT-tiny""" , """roberta""" , __SCREAMING_SNAKE_CASE , precision="""fp32""" , device="""cuda:0""" if torch.cuda.is_available() else """cpu""" , enable_fusion=__SCREAMING_SNAKE_CASE , fusion_type="""aff_2d""" if enable_fusion else None , ) return model, model_cfg def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = {} _SCREAMING_SNAKE_CASE : Optional[Any] = R""".*sequential.(\d+).*""" _SCREAMING_SNAKE_CASE : Any = R""".*_projection.(\d+).*""" for key, value in state_dict.items(): # check if any key needs to be modified for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items(): if key_to_modify in key: _SCREAMING_SNAKE_CASE : Optional[Any] = key.replace(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): # replace sequential layers with list _SCREAMING_SNAKE_CASE : List[Any] = re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) _SCREAMING_SNAKE_CASE : Dict = key.replace(F"""sequential.{sequential_layer}.""" , F"""layers.{int(__SCREAMING_SNAKE_CASE )//3}.linear.""" ) elif re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : List[str] = int(re.match(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).group(1 ) ) # Because in CLAP they use `nn.Sequential`... _SCREAMING_SNAKE_CASE : Dict = 1 if projecton_layer == 0 else 2 _SCREAMING_SNAKE_CASE : Union[str, Any] = key.replace(F"""_projection.{projecton_layer}.""" , F"""_projection.linear{transformers_projection_layer}.""" ) if "audio" and "qkv" in key: # split qkv into query key and value _SCREAMING_SNAKE_CASE : Dict = value _SCREAMING_SNAKE_CASE : List[Any] = mixed_qkv.size(0 ) // 3 _SCREAMING_SNAKE_CASE : Optional[Any] = mixed_qkv[:qkv_dim] _SCREAMING_SNAKE_CASE : str = mixed_qkv[qkv_dim : qkv_dim * 2] _SCREAMING_SNAKE_CASE : Any = mixed_qkv[qkv_dim * 2 :] _SCREAMING_SNAKE_CASE : Dict = query_layer _SCREAMING_SNAKE_CASE : List[Any] = key_layer _SCREAMING_SNAKE_CASE : Dict = value_layer else: _SCREAMING_SNAKE_CASE : Optional[Any] = value return model_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> List[Any]: _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = init_clap(__SCREAMING_SNAKE_CASE , enable_fusion=__SCREAMING_SNAKE_CASE ) clap_model.eval() _SCREAMING_SNAKE_CASE : Dict = clap_model.state_dict() _SCREAMING_SNAKE_CASE : Tuple = rename_state_dict(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = ClapConfig() _SCREAMING_SNAKE_CASE : Tuple = enable_fusion _SCREAMING_SNAKE_CASE : Dict = ClapModel(__SCREAMING_SNAKE_CASE ) # ignore the spectrogram embedding layer model.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) transformers_config.save_pretrained(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''') parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''') parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''') parser.add_argument('''--enable_fusion''', action='''store_true''', help='''Whether to enable fusion or not''') lowerCAmelCase_ = parser.parse_args() convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
635
0
"""simple docstring""" from typing import Optional, Tuple import jax import jax.numpy as jnp from flax import linen as nn from flax.core.frozen_dict import FrozenDict from transformers import CLIPConfig, FlaxPreTrainedModel from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1e-12 )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T _SCREAMING_SNAKE_CASE : Any = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T ) class _snake_case ( nn.Module ): """simple docstring""" a = 42 a = jnp.floataa def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxCLIPVisionModule(self.config.vision_config) _SCREAMING_SNAKE_CASE : Tuple = nn.Dense(self.config.projection_dim , use_bias=_A , dtype=self.dtype) _SCREAMING_SNAKE_CASE : Dict = self.param("""concept_embeds""" , jax.nn.initializers.ones , (1_7, self.config.projection_dim)) _SCREAMING_SNAKE_CASE : Any = self.param( """special_care_embeds""" , jax.nn.initializers.ones , (3, self.config.projection_dim)) _SCREAMING_SNAKE_CASE : Dict = self.param("""concept_embeds_weights""" , jax.nn.initializers.ones , (1_7,)) _SCREAMING_SNAKE_CASE : int = self.param("""special_care_embeds_weights""" , jax.nn.initializers.ones , (3,)) def __call__( self : List[Any] , _A : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = self.vision_model(_A)[1] _SCREAMING_SNAKE_CASE : Optional[int] = self.visual_projection(_A) _SCREAMING_SNAKE_CASE : str = jax_cosine_distance(_A , self.special_care_embeds) _SCREAMING_SNAKE_CASE : Any = jax_cosine_distance(_A , self.concept_embeds) # increase this value to create a stronger `nfsw` filter # at the cost of increasing the possibility of filtering benign image inputs _SCREAMING_SNAKE_CASE : Dict = 0.0 _SCREAMING_SNAKE_CASE : Optional[int] = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment _SCREAMING_SNAKE_CASE : Tuple = jnp.round(_A , 3) _SCREAMING_SNAKE_CASE : List[str] = jnp.any(special_scores > 0 , axis=1 , keepdims=_A) # Use a lower threshold if an image has any special care concept _SCREAMING_SNAKE_CASE : List[Any] = is_special_care * 0.01 _SCREAMING_SNAKE_CASE : Dict = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment _SCREAMING_SNAKE_CASE : int = jnp.round(_A , 3) _SCREAMING_SNAKE_CASE : Optional[int] = jnp.any(concept_scores > 0 , axis=1) return has_nsfw_concepts class _snake_case ( __snake_case ): """simple docstring""" a = CLIPConfig a = "clip_input" a = FlaxStableDiffusionSafetyCheckerModule def __init__( self : Optional[int] , _A : CLIPConfig , _A : Optional[Tuple] = None , _A : int = 0 , _A : jnp.dtype = jnp.floataa , _A : bool = True , **_A : Optional[int] , ): """simple docstring""" if input_shape is None: _SCREAMING_SNAKE_CASE : Optional[Any] = (1, 2_2_4, 2_2_4, 3) _SCREAMING_SNAKE_CASE : Any = self.module_class(config=_A , dtype=_A , **_A) super().__init__(_A , _A , input_shape=_A , seed=_A , dtype=_A , _do_init=_do_init) def _lowerCAmelCase ( self : Dict , _A : jax.random.KeyArray , _A : Tuple , _A : FrozenDict = None): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = jax.random.normal(_A , _A) _SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(_A) _SCREAMING_SNAKE_CASE : Tuple = {"""params""": params_rng, """dropout""": dropout_rng} _SCREAMING_SNAKE_CASE : Optional[int] = self.module.init(_A , _A)["""params"""] return random_params def __call__( self : Tuple , _A : Union[str, Any] , _A : dict = None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = jnp.transpose(_A , (0, 2, 3, 1)) return self.module.apply( {"""params""": params or self.params} , jnp.array(_A , dtype=jnp.floataa) , rngs={} , )
720
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9}, }, { "framework": "tensorflow", "script": "run_tf.py", "model_name_or_path": "distilbert-base-cased", "instance_type": "ml.g4dn.xlarge", "results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Union[str, Any] , _A : str=1): """simple docstring""" return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-single""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={**self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version="""py36""" , ) def _lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.create_estimator() # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Any = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : Any = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : int = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
635
0
"""simple docstring""" import os import tempfile import unittest import numpy as np from diffusers.utils import is_flax_available from diffusers.utils.testing_utils import require_flax, slow if is_flax_available(): import jax import jax.numpy as jnp from flax.jax_utils import replicate from flax.training.common_utils import shard from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline @require_flax class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Any): """simple docstring""" with tempfile.TemporaryDirectory() as tmpdirname: # pipeline has Flax weights _SCREAMING_SNAKE_CASE : Any = FlaxDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=_A , cache_dir=_A) _SCREAMING_SNAKE_CASE : Tuple = [t[-1] for t in os.walk(os.path.join(_A , os.listdir(_A)[0] , """snapshots"""))] _SCREAMING_SNAKE_CASE : List[str] = [item for sublist in all_root_files for item in sublist] # None of the downloaded files should be a PyTorch file even if we have some here: # https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin assert not any(f.endswith(""".bin""") for f in files) @slow @require_flax class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Dict = FlaxStableDiffusionPipeline.from_pretrained( """hf-internal-testing/tiny-stable-diffusion-pipe""" , safety_checker=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0) _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() _SCREAMING_SNAKE_CASE : int = num_samples * [prompt] _SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(_A) # shard inputs and rng _SCREAMING_SNAKE_CASE : int = replicate(_A) _SCREAMING_SNAKE_CASE : str = jax.random.split(_A , _A) _SCREAMING_SNAKE_CASE : List[str] = shard(_A) _SCREAMING_SNAKE_CASE : str = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 6_4, 6_4, 3) if jax.device_count() == 8: assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 4.1_514_745) < 1e-3 assert np.abs(np.abs(_A , dtype=np.floataa).sum() - 4_9_9_4_7.8_7_5) < 5e-1 _SCREAMING_SNAKE_CASE : Union[str, Any] = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:]))) assert len(_A) == num_samples def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""flax""" , safety_checker=_A) _SCREAMING_SNAKE_CASE : Any = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE : Optional[int] = jax.random.PRNGKey(0) _SCREAMING_SNAKE_CASE : List[Any] = 5_0 _SCREAMING_SNAKE_CASE : str = jax.device_count() _SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt] _SCREAMING_SNAKE_CASE : List[str] = pipeline.prepare_inputs(_A) # shard inputs and rng _SCREAMING_SNAKE_CASE : Dict = replicate(_A) _SCREAMING_SNAKE_CASE : Tuple = jax.random.split(_A , _A) _SCREAMING_SNAKE_CASE : List[Any] = shard(_A) _SCREAMING_SNAKE_CASE : int = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.05_652_401)) < 1e-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_8_3_8_0_8.2)) < 5e-1 def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=_A) _SCREAMING_SNAKE_CASE : List[Any] = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE : Optional[int] = jax.random.PRNGKey(0) _SCREAMING_SNAKE_CASE : Dict = 5_0 _SCREAMING_SNAKE_CASE : Any = jax.device_count() _SCREAMING_SNAKE_CASE : List[Any] = num_samples * [prompt] _SCREAMING_SNAKE_CASE : Optional[Any] = pipeline.prepare_inputs(_A) # shard inputs and rng _SCREAMING_SNAKE_CASE : Optional[Any] = replicate(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(_A , _A) _SCREAMING_SNAKE_CASE : str = shard(_A) _SCREAMING_SNAKE_CASE : int = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1e-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5e-1 def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa) _SCREAMING_SNAKE_CASE : List[str] = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.PRNGKey(0) _SCREAMING_SNAKE_CASE : Any = 5_0 _SCREAMING_SNAKE_CASE : Dict = jax.device_count() _SCREAMING_SNAKE_CASE : int = num_samples * [prompt] _SCREAMING_SNAKE_CASE : Tuple = pipeline.prepare_inputs(_A) # shard inputs and rng _SCREAMING_SNAKE_CASE : List[str] = replicate(_A) _SCREAMING_SNAKE_CASE : Dict = jax.random.split(_A , _A) _SCREAMING_SNAKE_CASE : List[str] = shard(_A) _SCREAMING_SNAKE_CASE : Any = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.04_003_906)) < 1e-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_7_3_5_1_6.7_5)) < 5e-1 def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = FlaxDDIMScheduler( beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , set_alpha_to_one=_A , steps_offset=1 , ) _SCREAMING_SNAKE_CASE : int = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , scheduler=_A , safety_checker=_A , ) _SCREAMING_SNAKE_CASE : List[Any] = scheduler.create_state() _SCREAMING_SNAKE_CASE : Optional[Any] = scheduler_state _SCREAMING_SNAKE_CASE : Union[str, Any] = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE : int = jax.random.PRNGKey(0) _SCREAMING_SNAKE_CASE : Optional[int] = 5_0 _SCREAMING_SNAKE_CASE : List[Any] = jax.device_count() _SCREAMING_SNAKE_CASE : Any = num_samples * [prompt] _SCREAMING_SNAKE_CASE : Any = pipeline.prepare_inputs(_A) # shard inputs and rng _SCREAMING_SNAKE_CASE : Union[str, Any] = replicate(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = jax.random.split(_A , _A) _SCREAMING_SNAKE_CASE : Any = shard(_A) _SCREAMING_SNAKE_CASE : Dict = pipeline(_A , _A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) if jax.device_count() == 8: assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa).sum() - 0.045_043_945)) < 1e-3 assert np.abs((np.abs(_A , dtype=np.floataa).sum() - 2_3_4_7_6_9_3.5)) < 5e-1 def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : List[str] = ( """A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of""" """ field, close up, split lighting, cinematic""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = jax.device_count() _SCREAMING_SNAKE_CASE : Optional[Any] = num_samples * [prompt] _SCREAMING_SNAKE_CASE : List[Any] = jax.random.split(jax.random.PRNGKey(0) , _A) _SCREAMING_SNAKE_CASE : List[str] = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=_A , ) _SCREAMING_SNAKE_CASE : Optional[int] = replicate(_A) _SCREAMING_SNAKE_CASE : Optional[int] = pipeline.prepare_inputs(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = shard(_A) _SCREAMING_SNAKE_CASE : str = pipeline(_A , _A , _A , jit=_A).images assert images.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) _SCREAMING_SNAKE_CASE : int = images[2, 0, 2_5_6, 1_0:1_7, 1] # With memory efficient attention _SCREAMING_SNAKE_CASE : Tuple = FlaxStableDiffusionPipeline.from_pretrained( """CompVis/stable-diffusion-v1-4""" , revision="""bf16""" , dtype=jnp.bfloataa , safety_checker=_A , use_memory_efficient_attention=_A , ) _SCREAMING_SNAKE_CASE : Tuple = replicate(_A) _SCREAMING_SNAKE_CASE : str = pipeline.prepare_inputs(_A) _SCREAMING_SNAKE_CASE : int = shard(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = pipeline(_A , _A , _A , jit=_A).images assert images_eff.shape == (num_samples, 1, 5_1_2, 5_1_2, 3) _SCREAMING_SNAKE_CASE : Union[str, Any] = images[2, 0, 2_5_6, 1_0:1_7, 1] # I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum` # over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now. assert abs(slice_eff - slice).max() < 1e-2
721
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 1_00.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 1_00.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 1_00.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
635
0
"""simple docstring""" import argparse import ast import logging import os import sys import pandas as pd import torch from tqdm import tqdm from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration from transformers import logging as transformers_logging sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip lowerCAmelCase_ = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) transformers_logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[str]: if "token" in model_name_or_path: return "rag_token" if "sequence" in model_name_or_path: return "rag_sequence" if "bart" in model_name_or_path: return "bart" return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: return max(metric_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for gt in ground_truths ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: _SCREAMING_SNAKE_CASE : List[str] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Dict = [] if args.gold_data_mode == "qa": _SCREAMING_SNAKE_CASE : int = pd.read_csv(__SCREAMING_SNAKE_CASE , sep="""\t""" , header=__SCREAMING_SNAKE_CASE ) for answer_list in data[1]: _SCREAMING_SNAKE_CASE : Union[str, Any] = ast.literal_eval(__SCREAMING_SNAKE_CASE ) answers.append(__SCREAMING_SNAKE_CASE ) else: _SCREAMING_SNAKE_CASE : Optional[Any] = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[int] = [[reference] for reference in references] _SCREAMING_SNAKE_CASE : Optional[int] = 0 for prediction, ground_truths in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): total += 1 em += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) fa += metric_max_over_ground_truths(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = 100.0 * em / total _SCREAMING_SNAKE_CASE : Optional[Any] = 100.0 * fa / total logger.info(F"""F1: {fa:.2f}""" ) logger.info(F"""EM: {em:.2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = args.k _SCREAMING_SNAKE_CASE : int = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Any = [line.strip() for line in open(__SCREAMING_SNAKE_CASE , """r""" ).readlines()] _SCREAMING_SNAKE_CASE : Optional[Any] = 0 for hypo, reference in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[Any] = set(hypo.split("""\t""" )[:k] ) _SCREAMING_SNAKE_CASE : Union[str, Any] = set(reference.split("""\t""" ) ) total += 1 em += len(hypo_provenance & ref_provenance ) / k _SCREAMING_SNAKE_CASE : int = 100.0 * em / total logger.info(F"""Precision@{k}: {em: .2f}""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: def strip_title(__SCREAMING_SNAKE_CASE ): if title.startswith("""\"""" ): _SCREAMING_SNAKE_CASE : Optional[int] = title[1:] if title.endswith("""\"""" ): _SCREAMING_SNAKE_CASE : str = title[:-1] return title _SCREAMING_SNAKE_CASE : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , )["""input_ids"""].to(args.device ) _SCREAMING_SNAKE_CASE : List[str] = rag_model.rag.question_encoder(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = question_enc_outputs[0] _SCREAMING_SNAKE_CASE : List[Any] = rag_model.retriever( __SCREAMING_SNAKE_CASE , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : Optional[int] = rag_model.retriever.index.get_doc_dicts(result.doc_ids ) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for docs in all_docs: _SCREAMING_SNAKE_CASE : str = [strip_title(__SCREAMING_SNAKE_CASE ) for title in docs["""title"""]] provenance_strings.append("""\t""".join(__SCREAMING_SNAKE_CASE ) ) return provenance_strings def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus( __SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding=__SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.input_ids.to(args.device ) _SCREAMING_SNAKE_CASE : Union[str, Any] = inputs_dict.attention_mask.to(args.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = rag_model.generate( # rag_model overwrites generate __SCREAMING_SNAKE_CASE , attention_mask=__SCREAMING_SNAKE_CASE , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=__SCREAMING_SNAKE_CASE , num_return_sequences=1 , bad_words_ids=[[0, 0]] , ) _SCREAMING_SNAKE_CASE : Tuple = rag_model.retriever.generator_tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) if args.print_predictions: for q, a in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): logger.info("""Q: {} - A: {}""".format(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) return answers def lowerCamelCase_()-> List[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = argparse.ArgumentParser() parser.add_argument( """--model_type""" , choices=["""rag_sequence""", """rag_token""", """bart"""] , type=__SCREAMING_SNAKE_CASE , help=( """RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the""" """ model_name_or_path""" ) , ) parser.add_argument( """--index_name""" , default=__SCREAMING_SNAKE_CASE , choices=["""exact""", """compressed""", """legacy"""] , type=__SCREAMING_SNAKE_CASE , help="""RAG model retriever type""" , ) parser.add_argument( """--index_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Path to the retrieval index""" , ) parser.add_argument("""--n_docs""" , default=5 , type=__SCREAMING_SNAKE_CASE , help="""Number of retrieved docs""" ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained checkpoints or model identifier from huggingface.co/models""" , ) parser.add_argument( """--eval_mode""" , choices=["""e2e""", """retrieval"""] , default="""e2e""" , type=__SCREAMING_SNAKE_CASE , help=( """Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates""" """ precision@k.""" ) , ) parser.add_argument("""--k""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""k for the precision@k calculation""" ) parser.add_argument( """--evaluation_set""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a file containing evaluation samples""" , ) parser.add_argument( """--gold_data_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to a tab-separated file with gold samples""" , ) parser.add_argument( """--gold_data_mode""" , default="""qa""" , type=__SCREAMING_SNAKE_CASE , choices=["""qa""", """ans"""] , help=( """Format of the gold data file""" """qa - a single line in the following format: question [tab] answer_list""" """ans - a single line of the gold file contains the expected answer string""" ) , ) parser.add_argument( """--predictions_path""" , type=__SCREAMING_SNAKE_CASE , default="""predictions.txt""" , help="""Name of the predictions file, to be stored in the checkpoints directory""" , ) parser.add_argument( """--eval_all_checkpoints""" , action="""store_true""" , help="""Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number""" , ) parser.add_argument( """--eval_batch_size""" , default=8 , type=__SCREAMING_SNAKE_CASE , help="""Batch size per GPU/CPU for evaluation.""" , ) parser.add_argument( """--recalculate""" , help="""Recalculate predictions even if the prediction file exists""" , action="""store_true""" , ) parser.add_argument( """--num_beams""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""Number of beams to be used when generating answers""" , ) parser.add_argument("""--min_length""" , default=1 , type=__SCREAMING_SNAKE_CASE , help="""Min length of the generated answers""" ) parser.add_argument("""--max_length""" , default=50 , type=__SCREAMING_SNAKE_CASE , help="""Max length of the generated answers""" ) parser.add_argument( """--print_predictions""" , action="""store_true""" , help="""If True, prints predictions while evaluating.""" , ) parser.add_argument( """--print_docs""" , action="""store_true""" , help="""If True, prints docs retried while generating.""" , ) _SCREAMING_SNAKE_CASE : Dict = parser.parse_args() _SCREAMING_SNAKE_CASE : Any = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""" ) return args def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: _SCREAMING_SNAKE_CASE : Union[str, Any] = {} if args.model_type is None: _SCREAMING_SNAKE_CASE : Optional[int] = infer_model_type(args.model_name_or_path ) assert args.model_type is not None if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : List[Any] = RagTokenForGeneration if args.model_type == """rag_token""" else RagSequenceForGeneration _SCREAMING_SNAKE_CASE : Optional[Any] = args.n_docs if args.index_name is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = args.index_name if args.index_path is not None: _SCREAMING_SNAKE_CASE : Any = args.index_path else: _SCREAMING_SNAKE_CASE : Any = BartForConditionalGeneration _SCREAMING_SNAKE_CASE : int = ( [f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()] if args.eval_all_checkpoints else [args.model_name_or_path] ) logger.info("""Evaluate the following checkpoints: %s""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = get_scores if args.eval_mode == """e2e""" else get_precision_at_k _SCREAMING_SNAKE_CASE : Tuple = evaluate_batch_eae if args.eval_mode == """e2e""" else evaluate_batch_retrieval for checkpoint in checkpoints: if os.path.exists(args.predictions_path ) and (not args.recalculate): logger.info("""Calculating metrics based on an existing predictions file: {}""".format(args.predictions_path ) ) score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) continue logger.info("""***** Running evaluation for {} *****""".format(__SCREAMING_SNAKE_CASE ) ) logger.info(""" Batch size = %d""" , args.eval_batch_size ) logger.info(""" Predictions will be stored under {}""".format(args.predictions_path ) ) if args.model_type.startswith("""rag""" ): _SCREAMING_SNAKE_CASE : str = RagRetriever.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , retriever=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.retriever.init_retrieval() else: _SCREAMING_SNAKE_CASE : str = model_class.from_pretrained(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) model.to(args.device ) with open(args.evaluation_set , """r""" ) as eval_file, open(args.predictions_path , """w""" ) as preds_file: _SCREAMING_SNAKE_CASE : str = [] for line in tqdm(__SCREAMING_SNAKE_CASE ): questions.append(line.strip() ) if len(__SCREAMING_SNAKE_CASE ) == args.eval_batch_size: _SCREAMING_SNAKE_CASE : str = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) + """\n""" ) preds_file.flush() _SCREAMING_SNAKE_CASE : Any = [] if len(__SCREAMING_SNAKE_CASE ) > 0: _SCREAMING_SNAKE_CASE : List[str] = evaluate_batch_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) preds_file.write("""\n""".join(__SCREAMING_SNAKE_CASE ) ) preds_file.flush() score_fn(__SCREAMING_SNAKE_CASE , args.predictions_path , args.gold_data_path ) if __name__ == "__main__": lowerCAmelCase_ = get_args() main(args)
700
"""simple docstring""" import argparse import random import joblib import numpy as np import torch from igf.igf import ( SecondaryLearner, collect_objective_set, compute_perplexity, generate_datasets, load_gpta, recopy_gpta, set_seed, train_secondary_learner, ) from torch.utils.data import DataLoader, RandomSampler from transformers import GPTaLMHeadModel def lowerCamelCase_(__SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1_026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , )-> Union[str, Any]: set_seed(3 ) # generate train_data and objective_set _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = generate_datasets( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # keeps model same across runs set_seed(4 ) # model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights # can we train on GPU? _SCREAMING_SNAKE_CASE : Dict = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) # load pretrained model _SCREAMING_SNAKE_CASE : Any = load_gpta("""gpt2""" ).to(__SCREAMING_SNAKE_CASE ) print("""computing perplexity on objective set""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item() print("""perplexity on objective set:""" , __SCREAMING_SNAKE_CASE ) # collect igf pairs and save to file demo.jbl collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # clean up, delete model and data we don't need anymore del model, train_data, objective_set torch.cuda.empty_cache() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , )-> Optional[int]: set_seed(42 ) # Load pre-trained model _SCREAMING_SNAKE_CASE : Any = GPTaLMHeadModel.from_pretrained("""gpt2""" ) # Initialize secondary learner to use embedding weights of model _SCREAMING_SNAKE_CASE : Union[str, Any] = SecondaryLearner(__SCREAMING_SNAKE_CASE ) # Train secondary learner _SCREAMING_SNAKE_CASE : Any = train_secondary_learner( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , ) del model, secondary_learner_train_data torch.cuda.empty_cache() return secondary_learner def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1_000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Tuple = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = RandomSampler(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1 _SCREAMING_SNAKE_CASE : List[Any] = 0 _SCREAMING_SNAKE_CASE : Any = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) model.train() if secondary_learner is not None: secondary_learner.to(__SCREAMING_SNAKE_CASE ) secondary_learner.eval() _SCREAMING_SNAKE_CASE : Dict = [] _SCREAMING_SNAKE_CASE : Optional[int] = 0 _SCREAMING_SNAKE_CASE : Optional[Any] = [] _SCREAMING_SNAKE_CASE : int = [] # Compute the performance of the transformer model at the beginning _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) for epoch in range(int(__SCREAMING_SNAKE_CASE ) ): for step, example in enumerate(__SCREAMING_SNAKE_CASE ): torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = random.randint(0 , example.size(2 ) - context_len - 1 ) _SCREAMING_SNAKE_CASE : int = example[0, 0, start : start + context_len] lm_optimizer.zero_grad() _SCREAMING_SNAKE_CASE : Union[str, Any] = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = True if secondary_learner is not None: _SCREAMING_SNAKE_CASE : List[Any] = secondary_learner.forward( torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item() observed_qs.append(float(__SCREAMING_SNAKE_CASE ) ) # Here we implement the simple non-constant threshold for the predicted IG(X) value # We will decay the selectivity of our secondary learner filter from # 1 standard deviation above average to 1 below average after 10 batches. if global_step == 10: _SCREAMING_SNAKE_CASE : Dict = -1 if predicted_q < threshold: _SCREAMING_SNAKE_CASE : List[str] = False # If we passed the filter, add the context to the batch! if do_backprop: contexts.append(np.array(context.cpu() ) ) _SCREAMING_SNAKE_CASE : Union[str, Any] = outputs[0] lm_loss.backward() examples += 1 del outputs # Once the batch is filled with enough contexts, backprop on the batch. if examples == batch_size: torch.cuda.empty_cache() _SCREAMING_SNAKE_CASE : Any = 0 # Do LM backprop torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 ) lm_optimizer.step() lm_scheduler.step() # Update learning rate schedule global_step += 1 # Compute the performance of the transformer model at this batch if global_step % eval_interval == 0: _SCREAMING_SNAKE_CASE : Tuple = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) test_perps.append(__SCREAMING_SNAKE_CASE ) print("""Test perplexity, step""" , __SCREAMING_SNAKE_CASE , """:""" , __SCREAMING_SNAKE_CASE ) # Break out of the loop after 60 batches if max_steps > 0 and global_step > 60: break if max_steps > 0 and global_step > 60: break # save finetuned transformer model torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE ) torch.cuda.empty_cache() # Do some cleaning up so we can reinitialize for the next run of this function del lm_optimizer del lm_scheduler return model def lowerCamelCase_()-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" ) # Required parameters parser.add_argument( """--data_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The input data dir. Should contain data files for WikiText.""" , ) parser.add_argument( """--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , ) parser.add_argument( """--data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=( """A jbl file containing tokenized data which can be split as objective dataset, """ """train_dataset and test_dataset.""" ) , ) parser.add_argument( """--igf_data_file""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , ) parser.add_argument( """--output_dir""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""The output directory where the final fine-tuned model is stored.""" , ) parser.add_argument( """--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , ) parser.add_argument("""--seed""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="""A seed for reproducible training.""" ) parser.add_argument( """--context_len""" , default=32 , type=__SCREAMING_SNAKE_CASE , help=( """The maximum total input sequence length after tokenization. Sequences longer """ """than this will be truncated, sequences shorter will be padded.""" ) , ) parser.add_argument( """--size_objective_set""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""number of articles that are long enough to be used as our objective set""" , ) parser.add_argument( """--eval_freq""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""secondary model evaluation is triggered at eval_freq""" ) parser.add_argument("""--max_steps""" , default=1_000 , type=__SCREAMING_SNAKE_CASE , help="""To calculate training epochs""" ) parser.add_argument( """--secondary_learner_batch_size""" , default=128 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data for secondary learner""" , ) parser.add_argument( """--batch_size""" , default=16 , type=__SCREAMING_SNAKE_CASE , help="""batch size of training data of language model(gpt2) """ ) parser.add_argument( """--eval_interval""" , default=10 , type=__SCREAMING_SNAKE_CASE , help=( """decay the selectivity of our secondary learner filter from""" """1 standard deviation above average to 1 below average after 10 batches""" ) , ) parser.add_argument( """--number""" , default=100 , type=__SCREAMING_SNAKE_CASE , help="""The number of examples split to be used as objective_set/test_data""" ) parser.add_argument( """--min_len""" , default=1_026 , type=__SCREAMING_SNAKE_CASE , help="""The minimum length of the article to be used as objective set""" ) parser.add_argument( """--secondary_learner_max_epochs""" , default=15 , type=__SCREAMING_SNAKE_CASE , help="""number of epochs to train secondary learner""" ) parser.add_argument("""--trim""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""truncate the example if it exceeds context length""" ) parser.add_argument( """--threshold""" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=( """The threshold value used by secondary learner to filter the train_data and allow only""" """ informative data as input to the model""" ) , ) parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=__SCREAMING_SNAKE_CASE , help="""finetuned_model_name""" ) parser.add_argument( """--recopy_model""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , ) # function calls # Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner generate_n_pairs( context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , ) # Load train data for secondary learner _SCREAMING_SNAKE_CASE : Optional[int] = joblib.load("""data/IGF_values.jbl""" ) # Train secondary learner _SCREAMING_SNAKE_CASE : int = training_secondary_learner( __SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="""igf_model.pt""" , ) # load pretrained gpt2 model _SCREAMING_SNAKE_CASE : List[Any] = GPTaLMHeadModel.from_pretrained("""gpt2""" ) set_seed(42 ) # Generate train and test data to train and evaluate gpt2 model _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[Any] = generate_datasets( context_len=32 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=100 , min_len=1_026 , trim=__SCREAMING_SNAKE_CASE ) # fine-tuning of the gpt2 model using igf (Information Gain Filtration) finetune( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1_000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="""gpt2_finetuned.pt""" , ) if __name__ == "__main__": main()
635
0
"""simple docstring""" import dataclasses import json import sys import types from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError from copy import copy from enum import Enum from inspect import isclass from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints import yaml lowerCAmelCase_ = NewType('''DataClass''', Any) lowerCAmelCase_ = NewType('''DataClassType''', Any) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Any: if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise ArgumentTypeError( F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Callable[[str], Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = {str(__SCREAMING_SNAKE_CASE ): choice for choice in choices} return lambda __SCREAMING_SNAKE_CASE : str_to_choice.get(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(*, __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = dataclasses.MISSING , __SCREAMING_SNAKE_CASE = None , **__SCREAMING_SNAKE_CASE , )-> dataclasses.Field: if metadata is None: # Important, don't use as default param in function signature because dict is mutable and shared across function calls _SCREAMING_SNAKE_CASE : Optional[int] = {} if aliases is not None: _SCREAMING_SNAKE_CASE : Dict = aliases if help is not None: _SCREAMING_SNAKE_CASE : Optional[int] = help return dataclasses.field(metadata=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , default_factory=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) class _snake_case ( __snake_case ): """simple docstring""" a = 42 def __init__( self : Union[str, Any] , _A : Union[DataClassType, Iterable[DataClassType]] , **_A : str): """simple docstring""" if "formatter_class" not in kwargs: _SCREAMING_SNAKE_CASE : Optional[int] = ArgumentDefaultsHelpFormatter super().__init__(**_A) if dataclasses.is_dataclass(_A): _SCREAMING_SNAKE_CASE : List[str] = [dataclass_types] _SCREAMING_SNAKE_CASE : Optional[Any] = list(_A) for dtype in self.dataclass_types: self._add_dataclass_arguments(_A) @staticmethod def _lowerCAmelCase ( _A : ArgumentParser , _A : dataclasses.Field): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = f"""--{field.name}""" _SCREAMING_SNAKE_CASE : int = field.metadata.copy() # field.metadata is not used at all by Data Classes, # it is provided as a third-party extension mechanism. if isinstance(field.type , _A): raise RuntimeError( """Unresolved type detected, which should have been done with the help of """ """`typing.get_type_hints` method by default""") _SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("""aliases""" , []) if isinstance(_A , _A): _SCREAMING_SNAKE_CASE : int = [aliases] _SCREAMING_SNAKE_CASE : Tuple = getattr(field.type , """__origin__""" , field.type) if origin_type is Union or (hasattr(_A , """UnionType""") and isinstance(_A , types.UnionType)): if str not in field.type.__args__ and ( len(field.type.__args__) != 2 or type(_A) not in field.type.__args__ ): raise ValueError( """Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because""" """ the argument parser only supports one type per argument.""" f""" Problem encountered in field '{field.name}'.""") if type(_A) not in field.type.__args__: # filter `str` in Union _SCREAMING_SNAKE_CASE : Union[str, Any] = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1] _SCREAMING_SNAKE_CASE : Optional[Any] = getattr(field.type , """__origin__""" , field.type) elif bool not in field.type.__args__: # filter `NoneType` in Union (except for `Union[bool, NoneType]`) _SCREAMING_SNAKE_CASE : int = ( field.type.__args__[0] if isinstance(_A , field.type.__args__[1]) else field.type.__args__[1] ) _SCREAMING_SNAKE_CASE : Optional[Any] = getattr(field.type , """__origin__""" , field.type) # A variable to store kwargs for a boolean field, if needed # so that we can init a `no_*` complement argument (see below) _SCREAMING_SNAKE_CASE : List[Any] = {} if origin_type is Literal or (isinstance(field.type , _A) and issubclass(field.type , _A)): if origin_type is Literal: _SCREAMING_SNAKE_CASE : Optional[Any] = field.type.__args__ else: _SCREAMING_SNAKE_CASE : int = [x.value for x in field.type] _SCREAMING_SNAKE_CASE : List[str] = make_choice_type_function(kwargs["""choices"""]) if field.default is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Any = field.default else: _SCREAMING_SNAKE_CASE : Union[str, Any] = True elif field.type is bool or field.type == Optional[bool]: # Copy the currect kwargs to use to instantiate a `no_*` complement argument below. # We do not initialize it here because the `no_*` alternative must be instantiated after the real argument _SCREAMING_SNAKE_CASE : int = copy(_A) # Hack because type=bool in argparse does not behave as we want. _SCREAMING_SNAKE_CASE : Optional[Any] = string_to_bool if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING): # Default value is False if we have no default when of type bool. _SCREAMING_SNAKE_CASE : Dict = False if field.default is dataclasses.MISSING else field.default # This is the value that will get picked if we don't include --field_name in any way _SCREAMING_SNAKE_CASE : str = default # This tells argparse we accept 0 or 1 value after --field_name _SCREAMING_SNAKE_CASE : str = """?""" # This is the value that will get picked if we do --field_name (without value) _SCREAMING_SNAKE_CASE : List[Any] = True elif isclass(_A) and issubclass(_A , _A): _SCREAMING_SNAKE_CASE : Tuple = field.type.__args__[0] _SCREAMING_SNAKE_CASE : Optional[int] = """+""" if field.default_factory is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Union[str, Any] = field.default_factory() elif field.default is dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Optional[Any] = True else: _SCREAMING_SNAKE_CASE : List[str] = field.type if field.default is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : int = field.default elif field.default_factory is not dataclasses.MISSING: _SCREAMING_SNAKE_CASE : Union[str, Any] = field.default_factory() else: _SCREAMING_SNAKE_CASE : Any = True parser.add_argument(_A , *_A , **_A) # Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added. # Order is important for arguments with the same destination! # We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down # here and we do not need those changes/additional keys. if field.default is True and (field.type is bool or field.type == Optional[bool]): _SCREAMING_SNAKE_CASE : Union[str, Any] = False parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **_A) def _lowerCAmelCase ( self : Optional[Any] , _A : DataClassType): """simple docstring""" if hasattr(_A , """_argument_group_name"""): _SCREAMING_SNAKE_CASE : List[str] = self.add_argument_group(dtype._argument_group_name) else: _SCREAMING_SNAKE_CASE : str = self try: _SCREAMING_SNAKE_CASE : Dict[str, type] = get_type_hints(_A) except NameError: raise RuntimeError( f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """ """removing line of `from __future__ import annotations` which opts in Postponed """ """Evaluation of Annotations (PEP 563)""") except TypeError as ex: # Remove this block when we drop Python 3.9 support if sys.version_info[:2] < (3, 1_0) and "unsupported operand type(s) for |" in str(_A): _SCREAMING_SNAKE_CASE : List[str] = """.""".join(map(_A , sys.version_info[:3])) raise RuntimeError( f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """ """line of `from __future__ import annotations` which opts in union types as """ """`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """ """support Python versions that lower than 3.10, you need to use """ """`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """ """`X | None`.""") from ex raise for field in dataclasses.fields(_A): if not field.init: continue _SCREAMING_SNAKE_CASE : Any = type_hints[field.name] self._parse_dataclass_field(_A , _A) def _lowerCAmelCase ( self : Any , _A : Union[str, Any]=None , _A : Union[str, Any]=False , _A : Union[str, Any]=True , _A : Union[str, Any]=None , _A : int=None , ): """simple docstring""" if args_file_flag or args_filename or (look_for_args_file and len(sys.argv)): _SCREAMING_SNAKE_CASE : List[str] = [] if args_filename: args_files.append(Path(_A)) elif look_for_args_file and len(sys.argv): args_files.append(Path(sys.argv[0]).with_suffix(""".args""")) # args files specified via command line flag should overwrite default args files so we add them last if args_file_flag: # Create special parser just to extract the args_file_flag values _SCREAMING_SNAKE_CASE : int = ArgumentParser() args_file_parser.add_argument(_A , type=_A , action="""append""") # Use only remaining args for further parsing (remove the args_file_flag) _SCREAMING_SNAKE_CASE : Any = args_file_parser.parse_known_args(args=_A) _SCREAMING_SNAKE_CASE : List[str] = vars(_A).get(args_file_flag.lstrip("""-""") , _A) if cmd_args_file_paths: args_files.extend([Path(_A) for p in cmd_args_file_paths]) _SCREAMING_SNAKE_CASE : Optional[Any] = [] for args_file in args_files: if args_file.exists(): file_args += args_file.read_text().split() # in case of duplicate arguments the last one has precedence # args specified via the command line should overwrite args from files, so we add them last _SCREAMING_SNAKE_CASE : str = file_args + args if args is not None else file_args + sys.argv[1:] _SCREAMING_SNAKE_CASE : int = self.parse_known_args(args=_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = [] for dtype in self.dataclass_types: _SCREAMING_SNAKE_CASE : Dict = {f.name for f in dataclasses.fields(_A) if f.init} _SCREAMING_SNAKE_CASE : List[str] = {k: v for k, v in vars(_A).items() if k in keys} for k in keys: delattr(_A , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = dtype(**_A) outputs.append(_A) if len(namespace.__dict__) > 0: # additional namespace. outputs.append(_A) if return_remaining_strings: return (*outputs, remaining_args) else: if remaining_args: raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""") return (*outputs,) def _lowerCAmelCase ( self : List[Any] , _A : Dict[str, Any] , _A : bool = False): """simple docstring""" _SCREAMING_SNAKE_CASE : str = set(args.keys()) _SCREAMING_SNAKE_CASE : Dict = [] for dtype in self.dataclass_types: _SCREAMING_SNAKE_CASE : Union[str, Any] = {f.name for f in dataclasses.fields(_A) if f.init} _SCREAMING_SNAKE_CASE : Optional[Any] = {k: v for k, v in args.items() if k in keys} unused_keys.difference_update(inputs.keys()) _SCREAMING_SNAKE_CASE : Optional[int] = dtype(**_A) outputs.append(_A) if not allow_extra_keys and unused_keys: raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(_A)}""") return tuple(_A) def _lowerCAmelCase ( self : Optional[int] , _A : str , _A : bool = False): """simple docstring""" with open(Path(_A) , encoding="""utf-8""") as open_json_file: _SCREAMING_SNAKE_CASE : List[str] = json.loads(open_json_file.read()) _SCREAMING_SNAKE_CASE : List[Any] = self.parse_dict(_A , allow_extra_keys=_A) return tuple(_A) def _lowerCAmelCase ( self : Dict , _A : str , _A : bool = False): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.parse_dict(yaml.safe_load(Path(_A).read_text()) , allow_extra_keys=_A) return tuple(_A)
701
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
635
0
"""simple docstring""" import argparse import os import re import packaging.version lowerCAmelCase_ : Any = '''examples/''' lowerCAmelCase_ : Optional[Any] = { '''examples''': (re.compile(R'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''), '''init''': (re.compile(R'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''), '''setup''': (re.compile(R'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), R'''\1version="VERSION",'''), '''doc''': (re.compile(R'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''), } lowerCAmelCase_ : Optional[int] = { '''init''': '''src/transformers/__init__.py''', '''setup''': '''setup.py''', } lowerCAmelCase_ : List[Any] = '''README.md''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _SCREAMING_SNAKE_CASE : Dict = f.read() _SCREAMING_SNAKE_CASE : Any = REPLACE_PATTERNS[pattern] _SCREAMING_SNAKE_CASE : List[Any] = replace.replace("""VERSION""" , __SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Any = re_pattern.sub(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.write(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: for folder, directories, fnames in os.walk(__SCREAMING_SNAKE_CASE ): # Removing some of the folders with non-actively maintained examples from the walk if "research_projects" in directories: directories.remove("""research_projects""" ) if "legacy" in directories: directories.remove("""legacy""" ) for fname in fnames: if fname.endswith(""".py""" ): update_version_in_file(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE , pattern="""examples""" ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False )-> Optional[int]: for pattern, fname in REPLACE_FILES.items(): update_version_in_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if not patch: update_version_in_examples(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[int]: _SCREAMING_SNAKE_CASE : Tuple = """🤗 Transformers currently provides the following architectures""" _SCREAMING_SNAKE_CASE : Any = """1. Want to contribute a new model?""" with open(__SCREAMING_SNAKE_CASE , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f: _SCREAMING_SNAKE_CASE : Any = f.readlines() # Find the start of the list. _SCREAMING_SNAKE_CASE : Dict = 0 while not lines[start_index].startswith(_start_prompt ): start_index += 1 start_index += 1 _SCREAMING_SNAKE_CASE : Union[str, Any] = start_index # Update the lines in the model list. while not lines[index].startswith(_end_prompt ): if lines[index].startswith("""1.""" ): _SCREAMING_SNAKE_CASE : Union[str, Any] = lines[index].replace( """https://huggingface.co/docs/transformers/main/model_doc""" , """https://huggingface.co/docs/transformers/model_doc""" , ) index += 1 with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f: f.writelines(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Any: with open(REPLACE_FILES["""init"""] , """r""" ) as f: _SCREAMING_SNAKE_CASE : Dict = f.read() _SCREAMING_SNAKE_CASE : Dict = REPLACE_PATTERNS["""init"""][0].search(__SCREAMING_SNAKE_CASE ).groups()[0] return packaging.version.parse(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE=False )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Union[str, Any] = get_version() if patch and default_version.is_devrelease: raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" ) if default_version.is_devrelease: _SCREAMING_SNAKE_CASE : Any = default_version.base_version elif patch: _SCREAMING_SNAKE_CASE : Optional[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}""" else: _SCREAMING_SNAKE_CASE : Dict = F"""{default_version.major}.{default_version.minor + 1}.0""" # Now let's ask nicely if that's the right one. _SCREAMING_SNAKE_CASE : Union[str, Any] = input(F"""Which version are you releasing? [{default_version}]""" ) if len(__SCREAMING_SNAKE_CASE ) == 0: _SCREAMING_SNAKE_CASE : Any = default_version print(F"""Updating version to {version}.""" ) global_version_update(__SCREAMING_SNAKE_CASE , patch=__SCREAMING_SNAKE_CASE ) if not patch: print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() def lowerCamelCase_()-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = get_version() _SCREAMING_SNAKE_CASE : Dict = F"""{current_version.major}.{current_version.minor + 1}.0.dev0""" _SCREAMING_SNAKE_CASE : Optional[int] = current_version.base_version # Check with the user we got that right. _SCREAMING_SNAKE_CASE : Optional[Any] = input(F"""Which version are we developing now? [{dev_version}]""" ) if len(__SCREAMING_SNAKE_CASE ) == 0: _SCREAMING_SNAKE_CASE : Optional[Any] = dev_version print(F"""Updating version to {version}.""" ) global_version_update(__SCREAMING_SNAKE_CASE ) print("""Cleaning main README, don't forget to run `make fix-copies`.""" ) clean_main_ref_in_model_list() if __name__ == "__main__": lowerCAmelCase_ : Optional[int] = argparse.ArgumentParser() parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''') parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''') lowerCAmelCase_ : Optional[Any] = parser.parse_args() if not args.post_release: pre_release_work(patch=args.patch) elif args.patch: print('''Nothing to do after a patch :-)''') else: post_release_work()
702
"""simple docstring""" import argparse from pathlib import Path from typing import Dict, OrderedDict, Tuple import torch from audiocraft.models import MusicGen from transformers import ( AutoFeatureExtractor, AutoTokenizer, EncodecModel, MusicgenDecoderConfig, MusicgenForConditionalGeneration, MusicgenProcessor, TaEncoderModel, ) from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM from transformers.utils import logging logging.set_verbosity_info() lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = ['''model.decoder.embed_positions.weights'''] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if "emb" in name: _SCREAMING_SNAKE_CASE : List[Any] = name.replace("""emb""" , """model.decoder.embed_tokens""" ) if "transformer" in name: _SCREAMING_SNAKE_CASE : List[str] = name.replace("""transformer""" , """model.decoder""" ) if "cross_attention" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""cross_attention""" , """encoder_attn""" ) if "linear1" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linear1""" , """fc1""" ) if "linear2" in name: _SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""linear2""" , """fc2""" ) if "norm1" in name: _SCREAMING_SNAKE_CASE : int = name.replace("""norm1""" , """self_attn_layer_norm""" ) if "norm_cross" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm_cross""" , """encoder_attn_layer_norm""" ) if "norm2" in name: _SCREAMING_SNAKE_CASE : Dict = name.replace("""norm2""" , """final_layer_norm""" ) if "out_norm" in name: _SCREAMING_SNAKE_CASE : Tuple = name.replace("""out_norm""" , """model.decoder.layer_norm""" ) if "linears" in name: _SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""linears""" , """lm_heads""" ) if "condition_provider.conditioners.description.output_proj" in name: _SCREAMING_SNAKE_CASE : str = name.replace("""condition_provider.conditioners.description.output_proj""" , """enc_to_dec_proj""" ) return name def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple[Dict, Dict]: _SCREAMING_SNAKE_CASE : str = list(state_dict.keys() ) _SCREAMING_SNAKE_CASE : Tuple = {} for key in keys: _SCREAMING_SNAKE_CASE : Dict = state_dict.pop(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : int = rename_keys(__SCREAMING_SNAKE_CASE ) if "in_proj_weight" in key: # split fused qkv proj _SCREAMING_SNAKE_CASE : str = val[:hidden_size, :] _SCREAMING_SNAKE_CASE : Any = val[hidden_size : 2 * hidden_size, :] _SCREAMING_SNAKE_CASE : Optional[Any] = val[-hidden_size:, :] elif "enc_to_dec_proj" in key: _SCREAMING_SNAKE_CASE : int = val else: _SCREAMING_SNAKE_CASE : Dict = val return state_dict, enc_dec_proj_state_dict def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> MusicgenDecoderConfig: if checkpoint == "small": # default config values _SCREAMING_SNAKE_CASE : Optional[Any] = 1_024 _SCREAMING_SNAKE_CASE : str = 24 _SCREAMING_SNAKE_CASE : Any = 16 elif checkpoint == "medium": _SCREAMING_SNAKE_CASE : Dict = 1_536 _SCREAMING_SNAKE_CASE : Union[str, Any] = 48 _SCREAMING_SNAKE_CASE : Optional[Any] = 24 elif checkpoint == "large": _SCREAMING_SNAKE_CASE : List[Any] = 2_048 _SCREAMING_SNAKE_CASE : Optional[int] = 48 _SCREAMING_SNAKE_CASE : str = 32 else: raise ValueError(F"""Checkpoint should be one of `['small', 'medium', 'large']`, got {checkpoint}.""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = MusicgenDecoderConfig( hidden_size=__SCREAMING_SNAKE_CASE , ffn_dim=hidden_size * 4 , num_hidden_layers=__SCREAMING_SNAKE_CASE , num_attention_heads=__SCREAMING_SNAKE_CASE , ) return config @torch.no_grad() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE="cpu" )-> str: _SCREAMING_SNAKE_CASE : str = MusicGen.get_pretrained(__SCREAMING_SNAKE_CASE , device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[str] = decoder_config_from_checkpoint(__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = fairseq_model.lm.state_dict() _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[str] = rename_state_dict( __SCREAMING_SNAKE_CASE , hidden_size=decoder_config.hidden_size ) _SCREAMING_SNAKE_CASE : Tuple = TaEncoderModel.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[Any] = EncodecModel.from_pretrained("""facebook/encodec_32khz""" ) _SCREAMING_SNAKE_CASE : str = MusicgenForCausalLM(__SCREAMING_SNAKE_CASE ).eval() # load all decoder weights - expect that we'll be missing embeddings and enc-dec projection _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : str = decoder.load_state_dict(__SCREAMING_SNAKE_CASE , strict=__SCREAMING_SNAKE_CASE ) for key in missing_keys.copy(): if key.startswith(("""text_encoder""", """audio_encoder""") ) or key in EXPECTED_MISSING_KEYS: missing_keys.remove(__SCREAMING_SNAKE_CASE ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Missing key(s) in state_dict: {missing_keys}""" ) if len(__SCREAMING_SNAKE_CASE ) > 0: raise ValueError(F"""Unexpected key(s) in state_dict: {unexpected_keys}""" ) # init the composite model _SCREAMING_SNAKE_CASE : Dict = MusicgenForConditionalGeneration(text_encoder=__SCREAMING_SNAKE_CASE , audio_encoder=__SCREAMING_SNAKE_CASE , decoder=__SCREAMING_SNAKE_CASE ) # load the pre-trained enc-dec projection (from the decoder state dict) model.enc_to_dec_proj.load_state_dict(__SCREAMING_SNAKE_CASE ) # check we can do a forward pass _SCREAMING_SNAKE_CASE : Optional[Any] = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 ) _SCREAMING_SNAKE_CASE : Dict = input_ids.reshape(2 * 4 , -1 ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : Optional[int] = model(input_ids=__SCREAMING_SNAKE_CASE , decoder_input_ids=__SCREAMING_SNAKE_CASE ).logits if logits.shape != (8, 1, 2_048): raise ValueError("""Incorrect shape for logits""" ) # now construct the processor _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""t5-base""" ) _SCREAMING_SNAKE_CASE : List[str] = AutoFeatureExtractor.from_pretrained("""facebook/encodec_32khz""" , padding_side="""left""" ) _SCREAMING_SNAKE_CASE : Optional[int] = MusicgenProcessor(feature_extractor=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE ) # set the appropriate bos/pad token ids _SCREAMING_SNAKE_CASE : Optional[Any] = 2_048 _SCREAMING_SNAKE_CASE : List[Any] = 2_048 # set other default generation config params _SCREAMING_SNAKE_CASE : Any = int(30 * audio_encoder.config.frame_rate ) _SCREAMING_SNAKE_CASE : Tuple = True _SCREAMING_SNAKE_CASE : int = 3.0 if pytorch_dump_folder is not None: Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE ) logger.info(F"""Saving model {checkpoint} to {pytorch_dump_folder}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) processor.save_pretrained(__SCREAMING_SNAKE_CASE ) if repo_id: logger.info(F"""Pushing model {checkpoint} to {repo_id}""" ) model.push_to_hub(__SCREAMING_SNAKE_CASE ) processor.push_to_hub(__SCREAMING_SNAKE_CASE ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--checkpoint''', default='''small''', type=str, help='''Checkpoint size of the MusicGen model you\'d like to convert. Can be one of: `[\'small\', \'medium\', \'large\']`.''', ) parser.add_argument( '''--pytorch_dump_folder''', required=True, default=None, type=str, help='''Path to the output PyTorch model directory.''', ) parser.add_argument( '''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.''' ) parser.add_argument( '''--device''', default='''cpu''', type=str, help='''Torch device to run the conversion, either cpu or cuda.''' ) lowerCAmelCase_ = parser.parse_args() convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
635
0
from collections import namedtuple lowerCAmelCase_ = namedtuple('''from_to''', '''from_ to''') lowerCAmelCase_ = { '''cubicmeter''': from_to(1, 1), '''litre''': from_to(0.0_01, 1000), '''kilolitre''': from_to(1, 1), '''gallon''': from_to(0.0_04_54, 264.172), '''cubicyard''': from_to(0.7_64_55, 1.3_07_95), '''cubicfoot''': from_to(0.0_28, 35.31_47), '''cup''': from_to(0.0_00_23_65_88, 4226.75), } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> float: if from_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'from_type' value: {from_type!r} Supported values are:\n""" + """, """.join(__SCREAMING_SNAKE_CASE ) ) if to_type not in METRIC_CONVERSION: raise ValueError( F"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n""" + """, """.join(__SCREAMING_SNAKE_CASE ) ) return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to if __name__ == "__main__": import doctest doctest.testmod()
703
"""simple docstring""" import functools import operator from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''', # See all SEW models at https://huggingface.co/models?filter=sew } class _snake_case ( __snake_case ): """simple docstring""" a = "sew" def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ): """simple docstring""" super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A) _SCREAMING_SNAKE_CASE : str = hidden_size _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm _SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation _SCREAMING_SNAKE_CASE : Dict = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : int = list(_A) _SCREAMING_SNAKE_CASE : str = conv_bias _SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings _SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups _SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim) _SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers _SCREAMING_SNAKE_CASE : List[str] = intermediate_size _SCREAMING_SNAKE_CASE : str = squeeze_factor _SCREAMING_SNAKE_CASE : Dict = hidden_act _SCREAMING_SNAKE_CASE : str = num_attention_heads _SCREAMING_SNAKE_CASE : Dict = hidden_dropout _SCREAMING_SNAKE_CASE : Tuple = attention_dropout _SCREAMING_SNAKE_CASE : int = activation_dropout _SCREAMING_SNAKE_CASE : Any = feat_proj_dropout _SCREAMING_SNAKE_CASE : str = final_dropout _SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop _SCREAMING_SNAKE_CASE : Any = layer_norm_eps _SCREAMING_SNAKE_CASE : int = initializer_range _SCREAMING_SNAKE_CASE : List[Any] = vocab_size if ( (len(self.conv_stride) != self.num_feat_extract_layers) or (len(self.conv_kernel) != self.num_feat_extract_layers) or (len(self.conv_dim) != self.num_feat_extract_layers) ): raise ValueError( """Configuration for convolutional layers is incorrect.""" """It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,""" f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)""" f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""") # fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779 _SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment _SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob _SCREAMING_SNAKE_CASE : List[str] = mask_time_length _SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob _SCREAMING_SNAKE_CASE : int = mask_feature_length _SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks # ctc loss _SCREAMING_SNAKE_CASE : int = ctc_loss_reduction _SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity # sequence classification _SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum _SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size @property def _lowerCAmelCase ( self : Any): """simple docstring""" return functools.reduce(operator.mul , self.conv_stride , 1)
635
0
"""simple docstring""" import logging import os from dataclasses import dataclass, field from functools import partial from pathlib import Path from tempfile import TemporaryDirectory from typing import List, Optional import faiss import torch from datasets import Features, Sequence, Value, load_dataset from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser lowerCAmelCase_ = logging.getLogger(__name__) torch.set_grad_enabled(False) lowerCAmelCase_ = '''cuda''' if torch.cuda.is_available() else '''cpu''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=" " )-> List[str]: _SCREAMING_SNAKE_CASE : Optional[int] = text.split(__SCREAMING_SNAKE_CASE ) return [character.join(text[i : i + n] ).strip() for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )] def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> dict: _SCREAMING_SNAKE_CASE : Union[str, Any] = [], [] for title, text in zip(documents["""title"""] , documents["""text"""] ): if text is not None: for passage in split_text(__SCREAMING_SNAKE_CASE ): titles.append(title if title is not None else """""" ) texts.append(__SCREAMING_SNAKE_CASE ) return {"title": titles, "text": texts} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> dict: _SCREAMING_SNAKE_CASE : str = ctx_tokenizer( documents["""title"""] , documents["""text"""] , truncation=__SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )["""input_ids"""] _SCREAMING_SNAKE_CASE : List[str] = ctx_encoder(input_ids.to(device=__SCREAMING_SNAKE_CASE ) , return_dict=__SCREAMING_SNAKE_CASE ).pooler_output return {"embeddings": embeddings.detach().cpu().numpy()} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> Optional[int]: ###################################### logger.info("""Step 1 - Create the dataset""" ) ###################################### # The dataset needed for RAG must have three columns: # - title (string): title of the document # - text (string): text of a passage of the document # - embeddings (array of dimension d): DPR representation of the passage # Let's say you have documents in tab-separated csv files with columns "title" and "text" assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file" # You can load a Dataset object this way _SCREAMING_SNAKE_CASE : List[Any] = load_dataset( """csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""] ) # More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files # Then split the documents into passages of 100 words _SCREAMING_SNAKE_CASE : int = dataset.map(__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=processing_args.num_proc ) # And compute the embeddings _SCREAMING_SNAKE_CASE : Tuple = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ) _SCREAMING_SNAKE_CASE : Dict = Features( {"""text""": Value("""string""" ), """title""": Value("""string""" ), """embeddings""": Sequence(Value("""float32""" ) )} ) # optional, save as float32 instead of float64 to save space _SCREAMING_SNAKE_CASE : Dict = dataset.map( partial(__SCREAMING_SNAKE_CASE , ctx_encoder=__SCREAMING_SNAKE_CASE , ctx_tokenizer=__SCREAMING_SNAKE_CASE ) , batched=__SCREAMING_SNAKE_CASE , batch_size=processing_args.batch_size , features=__SCREAMING_SNAKE_CASE , ) # And finally save your dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""" ) dataset.save_to_disk(__SCREAMING_SNAKE_CASE ) # from datasets import load_from_disk # dataset = load_from_disk(passages_path) # to reload the dataset ###################################### logger.info("""Step 2 - Index the dataset""" ) ###################################### # Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search _SCREAMING_SNAKE_CASE : Any = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT ) dataset.add_faiss_index("""embeddings""" , custom_index=__SCREAMING_SNAKE_CASE ) # And save the index _SCREAMING_SNAKE_CASE : List[str] = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""" ) dataset.get_index("""embeddings""" ).save(__SCREAMING_SNAKE_CASE ) # dataset.load_faiss_index("embeddings", index_path) # to reload the index @dataclass class _snake_case : """simple docstring""" a = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" / "my_knowledge_dataset.csv" ) , metadata={"help": "Path to a tab-separated csv file with columns 'title' and 'text'"} , ) a = field( default=__snake_case , metadata={"help": "Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."} , ) a = field( default="facebook/rag-sequence-nq" , metadata={"help": "The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"} , ) a = field( default="facebook/dpr-ctx_encoder-multiset-base" , metadata={ "help": ( "The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or" " 'facebook/dpr-ctx_encoder-multiset-base'" ) } , ) a = field( default=str(Path(__snake_case ).parent / "test_run" / "dummy-kb" ) , metadata={"help": "Path to a directory where the dataset passages and the index will be saved"} , ) @dataclass class _snake_case : """simple docstring""" a = field( default=__snake_case , metadata={ "help": "The number of processes to use to split the documents into passages. Default is single process." } , ) a = field( default=16 , metadata={ "help": "The batch size to use when computing the passages embeddings using the DPR context encoder." } , ) @dataclass class _snake_case : """simple docstring""" a = field( default=7_68 , metadata={"help": "The dimension of the embeddings to pass to the HNSW Faiss index."} , ) a = field( default=1_28 , metadata={ "help": ( "The number of bi-directional links created for every new element during the HNSW index construction." ) } , ) if __name__ == "__main__": logging.basicConfig(level=logging.WARNING) logger.setLevel(logging.INFO) lowerCAmelCase_ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments)) lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = parser.parse_args_into_dataclasses() with TemporaryDirectory() as tmp_dir: lowerCAmelCase_ = rag_example_args.output_dir or tmp_dir main(rag_example_args, processing_args, index_hnsw_args)
704
"""simple docstring""" from typing import TYPE_CHECKING from ...utils import ( OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_tf_available, is_torch_available, ) lowerCAmelCase_ = {'''configuration_unispeech''': ['''UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''UniSpeechConfig''']} try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: lowerCAmelCase_ = [ '''UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST''', '''UniSpeechForCTC''', '''UniSpeechForPreTraining''', '''UniSpeechForSequenceClassification''', '''UniSpeechModel''', '''UniSpeechPreTrainedModel''', ] if TYPE_CHECKING: from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig try: if not is_torch_available(): raise OptionalDependencyNotAvailable() except OptionalDependencyNotAvailable: pass else: from .modeling_unispeech import ( UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST, UniSpeechForCTC, UniSpeechForPreTraining, UniSpeechForSequenceClassification, UniSpeechModel, UniSpeechPreTrainedModel, ) else: import sys lowerCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
635
0
"""simple docstring""" import warnings from ...processing_utils import ProcessorMixin from ...tokenization_utils_base import BatchEncoding class _snake_case ( __snake_case ): """simple docstring""" a = ["image_processor", "tokenizer"] a = "ChineseCLIPImageProcessor" a = ("BertTokenizer", "BertTokenizerFast") def __init__( self : Dict , _A : Tuple=None , _A : List[Any]=None , **_A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = None if "feature_extractor" in kwargs: warnings.warn( """The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`""" """ instead.""" , _A , ) _SCREAMING_SNAKE_CASE : str = kwargs.pop("""feature_extractor""") _SCREAMING_SNAKE_CASE : int = image_processor if image_processor is not None else feature_extractor if image_processor is None: raise ValueError("""You need to specify an `image_processor`.""") if tokenizer is None: raise ValueError("""You need to specify a `tokenizer`.""") super().__init__(_A , _A) _SCREAMING_SNAKE_CASE : Dict = self.image_processor def __call__( self : Optional[int] , _A : Optional[Any]=None , _A : Any=None , _A : Tuple=None , **_A : int): """simple docstring""" if text is None and images is None: raise ValueError("""You have to specify either text or images. Both cannot be none.""") if text is not None: _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer(_A , return_tensors=_A , **_A) if images is not None: _SCREAMING_SNAKE_CASE : List[Any] = self.image_processor(_A , return_tensors=_A , **_A) if text is not None and images is not None: _SCREAMING_SNAKE_CASE : Union[str, Any] = image_features.pixel_values return encoding elif text is not None: return encoding else: return BatchEncoding(data=dict(**_A) , tensor_type=_A) def _lowerCAmelCase ( self : str , *_A : Any , **_A : Any): """simple docstring""" return self.tokenizer.batch_decode(*_A , **_A) def _lowerCAmelCase ( self : Union[str, Any] , *_A : List[Any] , **_A : Any): """simple docstring""" return self.tokenizer.decode(*_A , **_A) @property def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer.model_input_names _SCREAMING_SNAKE_CASE : Union[str, Any] = self.image_processor.model_input_names return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names)) @property def _lowerCAmelCase ( self : List[str]): """simple docstring""" warnings.warn( """`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , _A , ) return self.image_processor_class
705
"""simple docstring""" import pyarrow.parquet as pq import pytest from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config from datasets.features.image import Image from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : int = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : str = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[Any] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : List[Any] = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : Optional[Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Tuple = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : int = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Dict = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE , split=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) assert dataset.split == split if split else "train" @pytest.mark.parametrize("""path_type""" , [str, list] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> str: if issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Any = parquet_path elif issubclass(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Union[str, Any] = [parquet_path] _SCREAMING_SNAKE_CASE : Optional[Any] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : str = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_dataset(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=("train",) )-> Union[str, Any]: assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for split in splits: _SCREAMING_SNAKE_CASE : int = dataset_dict[split] assert dataset.num_rows == 4 assert dataset.num_columns == 3 assert dataset.column_names == ["col_1", "col_2", "col_3"] for feature, expected_dtype in expected_features.items(): assert dataset.features[feature].dtype == expected_dtype @pytest.mark.parametrize("""keep_in_memory""" , [False, True] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Dict = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase(): _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetReader( {"""train""": parquet_path} , cache_dir=__SCREAMING_SNAKE_CASE , keep_in_memory=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize( """features""" , [ None, {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}, {"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""}, {"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""}, {"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""}, ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : Optional[int] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : Optional[Any] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : List[str] = features.copy() if features else default_expected_features _SCREAMING_SNAKE_CASE : str = ( Features({feature: Value(__SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None ) _SCREAMING_SNAKE_CASE : int = ParquetDatasetReader({"""train""": parquet_path} , features=__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) @pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: if split: _SCREAMING_SNAKE_CASE : Union[str, Any] = {split: parquet_path} else: _SCREAMING_SNAKE_CASE : Optional[int] = """train""" _SCREAMING_SNAKE_CASE : Any = {"""train""": parquet_path, """test""": parquet_path} _SCREAMING_SNAKE_CASE : List[str] = tmp_path / """cache""" _SCREAMING_SNAKE_CASE : List[str] = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""} _SCREAMING_SNAKE_CASE : Union[str, Any] = ParquetDatasetReader(__SCREAMING_SNAKE_CASE , cache_dir=__SCREAMING_SNAKE_CASE ).read() _check_parquet_datasetdict(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , splits=list(path.keys() ) ) assert all(dataset[split].split == split for split in path.keys() ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : List[str] = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : Tuple = pq.ParquetFile(tmp_path / """foo.parquet""" ) _SCREAMING_SNAKE_CASE : str = pf.read() assert dataset.data.table == output_table def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : Dict = str(shared_datadir / """test_image_rgb.jpg""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = {"""image""": [image_path]} _SCREAMING_SNAKE_CASE : Optional[Any] = Features({"""image""": Image()} ) _SCREAMING_SNAKE_CASE : Optional[int] = Dataset.from_dict(__SCREAMING_SNAKE_CASE , features=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Tuple = ParquetDatasetWriter(__SCREAMING_SNAKE_CASE , tmp_path / """foo.parquet""" ) assert writer.write() > 0 _SCREAMING_SNAKE_CASE : List[str] = Dataset.from_parquet(str(tmp_path / """foo.parquet""" ) ) assert dataset.features == reloaded_dataset.features _SCREAMING_SNAKE_CASE : List[Any] = ParquetDatasetReader(str(tmp_path / """foo.parquet""" ) , streaming=__SCREAMING_SNAKE_CASE ).read() assert dataset.features == reloaded_iterable_dataset.features @pytest.mark.parametrize( """feature, expected""" , [ (Features({"""foo""": Value("""int32""" )} ), None), (Features({"""image""": Image(), """foo""": Value("""int32""" )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS), (Features({"""nested""": Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS), ] , ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> int: assert get_writer_batch_size(__SCREAMING_SNAKE_CASE ) == expected
635
0
"""simple docstring""" from dataclasses import dataclass from typing import Optional, Tuple, Union import flax import jax import jax.numpy as jnp from ..configuration_utils import ConfigMixin, register_to_config from .scheduling_utils_flax import ( CommonSchedulerState, FlaxKarrasDiffusionSchedulers, FlaxSchedulerMixin, FlaxSchedulerOutput, add_noise_common, get_velocity_common, ) @flax.struct.dataclass class _snake_case : """simple docstring""" a = 42 # setable values a = 42 a = 42 a = None @classmethod def _lowerCAmelCase ( cls : str , _A : CommonSchedulerState , _A : jnp.ndarray , _A : jnp.ndarray): """simple docstring""" return cls(common=_A , init_noise_sigma=_A , timesteps=_A) @dataclass class _snake_case ( __snake_case ): """simple docstring""" a = 42 class _snake_case ( __snake_case , __snake_case ): """simple docstring""" a = [e.name for e in FlaxKarrasDiffusionSchedulers] a = 42 @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return True @register_to_config def __init__( self : Optional[int] , _A : int = 1_0_0_0 , _A : float = 0.0_001 , _A : float = 0.02 , _A : str = "linear" , _A : Optional[jnp.ndarray] = None , _A : str = "fixed_small" , _A : bool = True , _A : str = "epsilon" , _A : jnp.dtype = jnp.floataa , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = dtype def _lowerCAmelCase ( self : Dict , _A : Optional[CommonSchedulerState] = None): """simple docstring""" if common is None: _SCREAMING_SNAKE_CASE : Any = CommonSchedulerState.create(self) # standard deviation of the initial noise distribution _SCREAMING_SNAKE_CASE : Any = jnp.array(1.0 , dtype=self.dtype) _SCREAMING_SNAKE_CASE : int = jnp.arange(0 , self.config.num_train_timesteps).round()[::-1] return DDPMSchedulerState.create( common=_A , init_noise_sigma=_A , timesteps=_A , ) def _lowerCAmelCase ( self : List[str] , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : Optional[int] = None): """simple docstring""" return sample def _lowerCAmelCase ( self : Any , _A : DDPMSchedulerState , _A : int , _A : Tuple = ()): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = self.config.num_train_timesteps // num_inference_steps # creates integer timesteps by multiplying by ratio # rounding to avoid issues when num_inference_step is power of 3 _SCREAMING_SNAKE_CASE : List[str] = (jnp.arange(0 , _A) * step_ratio).round()[::-1] return state.replace( num_inference_steps=_A , timesteps=_A , ) def _lowerCAmelCase ( self : Dict , _A : DDPMSchedulerState , _A : Union[str, Any] , _A : Tuple=None , _A : List[Any]=None): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.alphas_cumprod[t] _SCREAMING_SNAKE_CASE : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype)) # For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf) # and sample from it to get previous sample # x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample _SCREAMING_SNAKE_CASE : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t] if variance_type is None: _SCREAMING_SNAKE_CASE : Any = self.config.variance_type # hacks - were probably added for training stability if variance_type == "fixed_small": _SCREAMING_SNAKE_CASE : int = jnp.clip(_A , a_min=1e-20) # for rl-diffuser https://arxiv.org/abs/2205.09991 elif variance_type == "fixed_small_log": _SCREAMING_SNAKE_CASE : str = jnp.log(jnp.clip(_A , a_min=1e-20)) elif variance_type == "fixed_large": _SCREAMING_SNAKE_CASE : Union[str, Any] = state.common.betas[t] elif variance_type == "fixed_large_log": # Glide max_log _SCREAMING_SNAKE_CASE : Tuple = jnp.log(state.common.betas[t]) elif variance_type == "learned": return predicted_variance elif variance_type == "learned_range": _SCREAMING_SNAKE_CASE : int = variance _SCREAMING_SNAKE_CASE : int = state.common.betas[t] _SCREAMING_SNAKE_CASE : int = (predicted_variance + 1) / 2 _SCREAMING_SNAKE_CASE : Dict = frac * max_log + (1 - frac) * min_log return variance def _lowerCAmelCase ( self : Dict , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : int , _A : jnp.ndarray , _A : Optional[jax.random.KeyArray] = None , _A : bool = True , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = timestep if key is None: _SCREAMING_SNAKE_CASE : Tuple = jax.random.PRNGKey(0) if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]: _SCREAMING_SNAKE_CASE : List[str] = jnp.split(_A , sample.shape[1] , axis=1) else: _SCREAMING_SNAKE_CASE : Dict = None # 1. compute alphas, betas _SCREAMING_SNAKE_CASE : Optional[Any] = state.common.alphas_cumprod[t] _SCREAMING_SNAKE_CASE : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype)) _SCREAMING_SNAKE_CASE : Optional[int] = 1 - alpha_prod_t _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 - alpha_prod_t_prev # 2. compute predicted original sample from predicted noise also called # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf if self.config.prediction_type == "epsilon": _SCREAMING_SNAKE_CASE : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5 elif self.config.prediction_type == "sample": _SCREAMING_SNAKE_CASE : str = model_output elif self.config.prediction_type == "v_prediction": _SCREAMING_SNAKE_CASE : Dict = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output else: raise ValueError( f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """ """ for the FlaxDDPMScheduler.""") # 3. Clip "predicted x_0" if self.config.clip_sample: _SCREAMING_SNAKE_CASE : int = jnp.clip(_A , -1 , 1) # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _SCREAMING_SNAKE_CASE : int = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t _SCREAMING_SNAKE_CASE : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t # 5. Compute predicted previous sample µ_t # See formula (7) from https://arxiv.org/pdf/2006.11239.pdf _SCREAMING_SNAKE_CASE : List[str] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample # 6. Add noise def random_variance(): _SCREAMING_SNAKE_CASE : Optional[Any] = jax.random.split(_A , num=1) _SCREAMING_SNAKE_CASE : str = jax.random.normal(_A , shape=model_output.shape , dtype=self.dtype) return (self._get_variance(_A , _A , predicted_variance=_A) ** 0.5) * noise _SCREAMING_SNAKE_CASE : Optional[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype)) _SCREAMING_SNAKE_CASE : List[str] = pred_prev_sample + variance if not return_dict: return (pred_prev_sample, state) return FlaxDDPMSchedulerOutput(prev_sample=_A , state=_A) def _lowerCAmelCase ( self : Optional[Any] , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , ): """simple docstring""" return add_noise_common(state.common , _A , _A , _A) def _lowerCAmelCase ( self : Any , _A : DDPMSchedulerState , _A : jnp.ndarray , _A : jnp.ndarray , _A : jnp.ndarray , ): """simple docstring""" return get_velocity_common(state.common , _A , _A , _A) def __len__( self : Union[str, Any]): """simple docstring""" return self.config.num_train_timesteps
706
"""simple docstring""" def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> int: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""only integers accepted as input""" ) else: _SCREAMING_SNAKE_CASE : List[Any] = str(abs(__SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : List[str] = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )] for index in range(len(__SCREAMING_SNAKE_CASE ) ): num_transpositions[index].pop(__SCREAMING_SNAKE_CASE ) return max( int("""""".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions ) if __name__ == "__main__": __import__('''doctest''').testmod()
635
0
"""simple docstring""" import gzip import hashlib import json import multiprocessing import os import re import shutil import time from pathlib import Path import numpy as np from arguments import PreprocessingArguments from datasets import load_dataset from minhash_deduplication import deduplicate_dataset from transformers import AutoTokenizer, HfArgumentParser lowerCAmelCase_ = re.compile(R'''\s+''') def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: return {"hash": hashlib.mda(re.sub(__SCREAMING_SNAKE_CASE , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Tuple: _SCREAMING_SNAKE_CASE : Optional[Any] = [len(__SCREAMING_SNAKE_CASE ) for line in example["""content"""].splitlines()] return {"line_mean": np.mean(__SCREAMING_SNAKE_CASE ), "line_max": max(__SCREAMING_SNAKE_CASE )} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Dict: _SCREAMING_SNAKE_CASE : List[Any] = np.mean([c.isalnum() for c in example["""content"""]] ) return {"alpha_frac": alpha_frac} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[str]: if example["hash"] in uniques: uniques.remove(example["""hash"""] ) return True else: return False def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 )-> Any: _SCREAMING_SNAKE_CASE : Any = ["""auto-generated""", """autogenerated""", """automatically generated"""] _SCREAMING_SNAKE_CASE : Union[str, Any] = example["""content"""].splitlines() for _, line in zip(range(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"autogenerated": True} else: return {"autogenerated": False} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=0.05 )-> Optional[int]: _SCREAMING_SNAKE_CASE : List[Any] = ["""unit tests""", """test file""", """configuration file"""] _SCREAMING_SNAKE_CASE : Optional[int] = example["""content"""].splitlines() _SCREAMING_SNAKE_CASE : Dict = 0 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 # first test for _, line in zip(range(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE ): for keyword in keywords: if keyword in line.lower(): return {"config_or_test": True} # second test _SCREAMING_SNAKE_CASE : List[Any] = example["""content"""].count("""\n""" ) _SCREAMING_SNAKE_CASE : Optional[Any] = int(coeff * nlines ) for line in lines: count_config += line.lower().count("""config""" ) count_test += line.lower().count("""test""" ) if count_config > threshold or count_test > threshold: return {"config_or_test": True} return {"config_or_test": False} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: _SCREAMING_SNAKE_CASE : Any = ["""def """, """class """, """for """, """while """] _SCREAMING_SNAKE_CASE : List[str] = example["""content"""].splitlines() for line in lines: for keyword in keywords: if keyword in line.lower(): return {"has_no_keywords": False} return {"has_no_keywords": True} def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=4 )-> Dict: _SCREAMING_SNAKE_CASE : Any = example["""content"""].splitlines() _SCREAMING_SNAKE_CASE : Tuple = 0 for line in lines: counter += line.lower().count("""=""" ) if counter > minimum: return {"has_few_assignments": False} return {"has_few_assignments": True} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> List[Any]: _SCREAMING_SNAKE_CASE : int = tokenizer(example["""content"""] , truncation=__SCREAMING_SNAKE_CASE )["""input_ids"""] _SCREAMING_SNAKE_CASE : int = len(example["""content"""] ) / len(__SCREAMING_SNAKE_CASE ) return {"ratio": ratio} def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: _SCREAMING_SNAKE_CASE : List[str] = {} results.update(get_hash(__SCREAMING_SNAKE_CASE ) ) results.update(line_stats(__SCREAMING_SNAKE_CASE ) ) results.update(alpha_stats(__SCREAMING_SNAKE_CASE ) ) results.update(char_token_ratio(__SCREAMING_SNAKE_CASE ) ) results.update(is_autogenerated(__SCREAMING_SNAKE_CASE ) ) results.update(is_config_or_test(__SCREAMING_SNAKE_CASE ) ) results.update(has_no_keywords(__SCREAMING_SNAKE_CASE ) ) results.update(has_few_assignments(__SCREAMING_SNAKE_CASE ) ) return results def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: if not check_uniques(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): return False elif example["autogenerated"]: return False elif example["line_max"] > args.line_max: return False elif example["line_mean"] > args.line_mean: return False elif example["alpha_frac"] < args.alpha_frac: return False elif example["ratio"] < args.min_token_ratio: return False elif example["config_or_test"] and np.random.rand() <= args.filter_proba: return False elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba: return False elif example["has_few_assignments"]: return False else: return True def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: with open(__SCREAMING_SNAKE_CASE , """rb""" ) as f_in: with gzip.open(str(__SCREAMING_SNAKE_CASE ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out: shutil.copyfileobj(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) os.unlink(__SCREAMING_SNAKE_CASE ) # Settings lowerCAmelCase_ = HfArgumentParser(PreprocessingArguments) lowerCAmelCase_ = parser.parse_args() if args.num_workers is None: lowerCAmelCase_ = multiprocessing.cpu_count() lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_dir) # Load dataset lowerCAmelCase_ = time.time() lowerCAmelCase_ = load_dataset(args.dataset_name, split='''train''') print(F"Time to load dataset: {time.time()-t_start:.2f}") # Run preprocessing lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.map(preprocess, num_proc=args.num_workers) print(F"Time to preprocess dataset: {time.time()-t_start:.2f}") # Deduplicate hashes lowerCAmelCase_ = set(ds.unique('''hash''')) lowerCAmelCase_ = len(uniques) / len(ds) print(F"Fraction of duplicates: {1-frac:.2%}") # Deduplicate data and apply heuristics lowerCAmelCase_ = time.time() lowerCAmelCase_ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args}) print(F"Time to filter dataset: {time.time()-t_start:.2f}") print(F"Size of filtered dataset: {len(ds_filter)}") # Deduplicate with minhash and jaccard similarity if args.near_deduplication: lowerCAmelCase_ = time.time() lowerCAmelCase_ , lowerCAmelCase_ = deduplicate_dataset(ds_filter, args.jaccard_threshold) print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}") print(F"Size of deduplicate dataset: {len(ds_filter)}") # Save data in batches of samples_per_file lowerCAmelCase_ = Path(args.output_dir) output_dir.mkdir(exist_ok=True) # save duplicate_clusters in the output_dir as artifacts # not sure it is the right place the save it if args.near_deduplication: with open(output_dir / '''duplicate_clusters.json''', '''w''') as f: json.dump(duplicate_clusters, f) lowerCAmelCase_ = output_dir / '''data''' data_dir.mkdir(exist_ok=True) lowerCAmelCase_ = time.time() for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)): lowerCAmelCase_ = str(data_dir / F"file-{file_number+1:012}.json") lowerCAmelCase_ = min(len(ds_filter), index + args.samples_per_file) ds_filter.select(list(range(index, end_index))).to_json(file_path) compress_file(file_path) print(F"Time to save dataset: {time.time()-t_start:.2f}")
707
"""simple docstring""" import unittest from queue import Empty from threading import Thread from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available from transformers.testing_utils import CaptureStdout, require_torch, torch_device from ..test_modeling_common import ids_tensor if is_torch_available(): import torch from transformers import AutoModelForCausalLM @require_torch class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Optional[int] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : List[str] = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Dict = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : str = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Any = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : List[Any] = -1 _SCREAMING_SNAKE_CASE : str = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : Any = tokenizer.decode(greedy_ids[0]) _SCREAMING_SNAKE_CASE : List[Any] = TextIteratorStreamer(_A) _SCREAMING_SNAKE_CASE : Any = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[Any] = Thread(target=model.generate , kwargs=_A) thread.start() _SCREAMING_SNAKE_CASE : Any = """""" for new_text in streamer: streamer_text += new_text self.assertEqual(_A , _A) def _lowerCAmelCase ( self : List[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : Dict = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Any = -1 _SCREAMING_SNAKE_CASE : Any = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : Optional[Any] = model.generate(_A , max_new_tokens=1_0 , do_sample=_A) _SCREAMING_SNAKE_CASE : str = greedy_ids[:, input_ids.shape[1] :] _SCREAMING_SNAKE_CASE : Dict = tokenizer.decode(new_greedy_ids[0]) with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Any = TextStreamer(_A , skip_prompt=_A) model.generate(_A , max_new_tokens=1_0 , do_sample=_A , streamer=_A) # The greedy text should be printed to stdout, except for the final "\n" in the streamer _SCREAMING_SNAKE_CASE : Optional[int] = cs.out[:-1] self.assertEqual(_A , _A) def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""distilgpt2""") _SCREAMING_SNAKE_CASE : Optional[Any] = AutoModelForCausalLM.from_pretrained("""distilgpt2""").to(_A) _SCREAMING_SNAKE_CASE : int = -1 _SCREAMING_SNAKE_CASE : List[str] = torch.ones((1, 5) , device=_A).long() * model.config.bos_token_id with CaptureStdout() as cs: _SCREAMING_SNAKE_CASE : Optional[int] = TextStreamer(_A , skip_special_tokens=_A) model.generate(_A , max_new_tokens=1 , do_sample=_A , streamer=_A) # The prompt contains a special token, so the streamer should not print it. As such, the output text, when # re-tokenized, must only contain one token _SCREAMING_SNAKE_CASE : Optional[Any] = cs.out[:-1] # Remove the final "\n" _SCREAMING_SNAKE_CASE : Tuple = tokenizer(_A , return_tensors="""pt""") self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1)) def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""") _SCREAMING_SNAKE_CASE : List[Any] = AutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""").to(_A) _SCREAMING_SNAKE_CASE : Tuple = -1 _SCREAMING_SNAKE_CASE : Dict = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(_A) _SCREAMING_SNAKE_CASE : int = TextIteratorStreamer(_A , timeout=0.001) _SCREAMING_SNAKE_CASE : List[Any] = {"""input_ids""": input_ids, """max_new_tokens""": 1_0, """do_sample""": False, """streamer""": streamer} _SCREAMING_SNAKE_CASE : List[str] = Thread(target=model.generate , kwargs=_A) thread.start() # The streamer will timeout after 0.001 seconds, so an exception will be raised with self.assertRaises(_A): _SCREAMING_SNAKE_CASE : str = """""" for new_text in streamer: streamer_text += new_text
635
0
"""simple docstring""" import contextlib import faulthandler import io import multiprocessing import os import platform import signal import tempfile def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : str = multiprocessing.Manager() _SCREAMING_SNAKE_CASE : Optional[int] = manager.list() _SCREAMING_SNAKE_CASE : Optional[Any] = multiprocessing.Process(target=__SCREAMING_SNAKE_CASE , args=(check_program, result, timeout) ) p.start() p.join(timeout=timeout + 1 ) if p.is_alive(): p.kill() if not result: result.append("""timed out""" ) return { "task_id": task_id, "passed": result[0] == "passed", "result": result[0], "completion_id": completion_id, } def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> List[Any]: with create_tempdir(): # These system calls are needed when cleaning up tempdir. import os import shutil _SCREAMING_SNAKE_CASE : Optional[Any] = shutil.rmtree _SCREAMING_SNAKE_CASE : List[str] = os.rmdir _SCREAMING_SNAKE_CASE : List[str] = os.chdir # Disable functionalities that can make destructive changes to the test. reliability_guard() # Run program. try: _SCREAMING_SNAKE_CASE : Any = {} with swallow_io(): with time_limit(__SCREAMING_SNAKE_CASE ): exec(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) result.append("""passed""" ) except TimeoutException: result.append("""timed out""" ) except BaseException as e: result.append(F"""failed: {e}""" ) # Needed for cleaning up. _SCREAMING_SNAKE_CASE : List[str] = rmtree _SCREAMING_SNAKE_CASE : Tuple = rmdir _SCREAMING_SNAKE_CASE : Any = chdir @contextlib.contextmanager def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Union[str, Any]: def signal_handler(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TimeoutException("""Timed out!""" ) signal.setitimer(signal.ITIMER_REAL , __SCREAMING_SNAKE_CASE ) signal.signal(signal.SIGALRM , __SCREAMING_SNAKE_CASE ) try: yield finally: signal.setitimer(signal.ITIMER_REAL , 0 ) @contextlib.contextmanager def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Optional[int] = WriteOnlyStringIO() with contextlib.redirect_stdout(__SCREAMING_SNAKE_CASE ): with contextlib.redirect_stderr(__SCREAMING_SNAKE_CASE ): with redirect_stdin(__SCREAMING_SNAKE_CASE ): yield @contextlib.contextmanager def lowerCamelCase_()-> List[Any]: with tempfile.TemporaryDirectory() as dirname: with chdir(__SCREAMING_SNAKE_CASE ): yield dirname class _snake_case ( __snake_case ): """simple docstring""" pass class _snake_case ( io.StringIO ): """simple docstring""" def _lowerCAmelCase ( self : Tuple , *_A : Optional[int] , **_A : Union[str, Any]): """simple docstring""" raise OSError def _lowerCAmelCase ( self : int , *_A : Optional[int] , **_A : Any): """simple docstring""" raise OSError def _lowerCAmelCase ( self : str , *_A : str , **_A : List[str]): """simple docstring""" raise OSError def _lowerCAmelCase ( self : Optional[Any] , *_A : Tuple , **_A : List[str]): """simple docstring""" return False class _snake_case ( contextlib._RedirectStream ): # type: ignore """simple docstring""" a = "stdin" @contextlib.contextmanager def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Optional[int]: if root == ".": yield return _SCREAMING_SNAKE_CASE : str = os.getcwd() os.chdir(__SCREAMING_SNAKE_CASE ) try: yield except BaseException as exc: raise exc finally: os.chdir(__SCREAMING_SNAKE_CASE ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE=None )-> Union[str, Any]: if maximum_memory_bytes is not None: import resource resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) ) resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) ) if not platform.uname().system == "Darwin": resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) ) faulthandler.disable() import builtins _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : Optional[Any] = None import os _SCREAMING_SNAKE_CASE : str = """1""" _SCREAMING_SNAKE_CASE : List[str] = None _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : Optional[Any] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Optional[Any] = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : int = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : Optional[int] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : List[str] = None _SCREAMING_SNAKE_CASE : str = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Dict = None _SCREAMING_SNAKE_CASE : int = None _SCREAMING_SNAKE_CASE : int = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : List[str] = None import shutil _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Tuple = None _SCREAMING_SNAKE_CASE : Union[str, Any] = None import subprocess _SCREAMING_SNAKE_CASE : Tuple = None # type: ignore _SCREAMING_SNAKE_CASE : Tuple = None import sys _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : Any = None _SCREAMING_SNAKE_CASE : List[Any] = None _SCREAMING_SNAKE_CASE : Tuple = None
708
"""simple docstring""" # Copyright 2023 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer from .base import PipelineTool class _snake_case ( __snake_case ): """simple docstring""" a = "facebook/bart-large-mnli" a = ( "This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which " "should be the text to classify, and `labels`, which should be the list of labels to use for classification. " "It returns the most likely label in the list of provided `labels` for the input text." ) a = "text_classifier" a = AutoTokenizer a = AutoModelForSequenceClassification a = ["text", ["text"]] a = ["text"] def _lowerCAmelCase ( self : int): """simple docstring""" super().setup() _SCREAMING_SNAKE_CASE : Any = self.model.config _SCREAMING_SNAKE_CASE : Any = -1 for idx, label in config.idalabel.items(): if label.lower().startswith("""entail"""): _SCREAMING_SNAKE_CASE : List[Any] = int(_A) if self.entailment_id == -1: raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""") def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : List[str]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = labels return self.pre_processor( [text] * len(_A) , [f"""This example is {label}""" for label in labels] , return_tensors="""pt""" , padding="""max_length""" , ) def _lowerCAmelCase ( self : Tuple , _A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = outputs.logits _SCREAMING_SNAKE_CASE : List[Any] = torch.argmax(logits[:, 2]).item() return self._labels[label_id]
635
0
"""simple docstring""" import json import os import subprocess import unittest from ast import literal_eval import pytest from parameterized import parameterized, parameterized_class from . import is_sagemaker_available if is_sagemaker_available(): from sagemaker import Session, TrainingJobAnalytics from sagemaker.huggingface import HuggingFace @pytest.mark.skipif( literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , ) @pytest.mark.usefixtures("sm_env" ) @parameterized_class( [ { "framework": "pytorch", "script": "run_glue_model_parallelism.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, { "framework": "pytorch", "script": "run_glue.py", "model_name_or_path": "roberta-large", "instance_type": "ml.p3dn.24xlarge", "results": {"train_runtime": 16_00, "eval_accuracy": 0.3, "eval_loss": 1.2}, }, ] ) class _snake_case ( unittest.TestCase ): """simple docstring""" def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.framework == "pytorch": subprocess.run( f"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding="""utf-8""" , check=_A , ) assert hasattr(self , """env""") def _lowerCAmelCase ( self : Optional[Any] , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = { """enabled""": True, """processes_per_host""": 8, } _SCREAMING_SNAKE_CASE : Dict = { """enabled""": True, """parameters""": { """microbatches""": 4, """placement_strategy""": """spread""", """pipeline""": """interleaved""", """optimize""": """speed""", """partitions""": 4, """ddp""": True, }, } _SCREAMING_SNAKE_CASE : Optional[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options} _SCREAMING_SNAKE_CASE : Any = """trainer""" if self.script == """run_glue.py""" else """smtrainer""" # creates estimator return HuggingFace( entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=_A , instance_type=self.instance_type , debugger_hook_config=_A , hyperparameters={ **self.env.hyperparameters, """model_name_or_path""": self.model_name_or_path, """max_steps""": 5_0_0, } , metric_definitions=self.env.metric_definitions , distribution=_A , py_version="""py36""" , ) def _lowerCAmelCase ( self : Dict , _A : List[Any]): """simple docstring""" TrainingJobAnalytics(_A).export_csv(f"""{self.env.test_path}/{job_name}_metrics.csv""") @parameterized.expand([(1,)]) def _lowerCAmelCase ( self : Tuple , _A : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : int = self.create_estimator(_A) # run training estimator.fit() # result dataframe _SCREAMING_SNAKE_CASE : Union[str, Any] = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe() # extract kpis _SCREAMING_SNAKE_CASE : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""]) _SCREAMING_SNAKE_CASE : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""]) # get train time from SageMaker job, this includes starting, preprocessing, stopping _SCREAMING_SNAKE_CASE : Optional[Any] = ( Session().describe_training_job(estimator.latest_training_job.name).get("""TrainingTimeInSeconds""" , 9_9_9_9_9_9) ) # assert kpis assert train_runtime <= self.results["train_runtime"] assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy) assert all(t <= self.results["""eval_loss"""] for t in eval_loss) # dump tests result into json file to share in PR with open(f"""{estimator.latest_training_job.name}.json""" , """w""") as outfile: json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , _A)
709
"""simple docstring""" import unittest from transformers import is_torch_available, is_vision_available from transformers.testing_utils import require_torch, require_vision, slow, torch_device if is_torch_available(): import torch from transformers import AutoModelForImageClassification if is_vision_available(): from transformers import AutoImageProcessor @require_torch @require_vision class _snake_case ( unittest.TestCase ): """simple docstring""" @slow def _lowerCAmelCase ( self : str): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = AutoImageProcessor.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") _SCREAMING_SNAKE_CASE : str = AutoModelForImageClassification.from_pretrained("""microsoft/dit-base-finetuned-rvlcdip""") model.to(_A) from datasets import load_dataset _SCREAMING_SNAKE_CASE : Any = load_dataset("""nielsr/rvlcdip-demo""") _SCREAMING_SNAKE_CASE : Any = dataset["""train"""][0]["""image"""].convert("""RGB""") _SCREAMING_SNAKE_CASE : str = image_processor(_A , return_tensors="""pt""").to(_A) # forward pass with torch.no_grad(): _SCREAMING_SNAKE_CASE : Any = model(**_A) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE : List[str] = torch.Size((1, 1_6)) self.assertEqual(logits.shape , _A) _SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor( [-0.4_158, -0.4_092, -0.4_347] , device=_A , dtype=torch.float , ) self.assertTrue(torch.allclose(logits[0, :3] , _A , atol=1e-4))
635
0
"""simple docstring""" from __future__ import annotations lowerCAmelCase_ = list[list[int]] # assigning initial values to the grid lowerCAmelCase_ = [ [3, 0, 6, 5, 0, 8, 4, 0, 0], [5, 2, 0, 0, 0, 0, 0, 0, 0], [0, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] # a grid with no solution lowerCAmelCase_ = [ [5, 0, 6, 5, 0, 8, 4, 0, 3], [5, 2, 0, 0, 0, 0, 0, 0, 2], [1, 8, 7, 0, 0, 0, 0, 3, 1], [0, 0, 3, 0, 1, 0, 0, 8, 0], [9, 0, 0, 8, 6, 3, 0, 0, 5], [0, 5, 0, 0, 9, 0, 6, 0, 0], [1, 3, 0, 0, 0, 0, 2, 5, 0], [0, 0, 0, 0, 0, 0, 0, 7, 4], [0, 0, 5, 2, 0, 6, 3, 0, 0], ] def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> bool: for i in range(9 ): if grid[row][i] == n or grid[i][column] == n: return False for i in range(3 ): for j in range(3 ): if grid[(row - row % 3) + i][(column - column % 3) + j] == n: return False return True def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> tuple[int, int] | None: for i in range(9 ): for j in range(9 ): if grid[i][j] == 0: return i, j return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> Matrix | None: if location := find_empty_location(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = location else: # If the location is ``None``, then the grid is solved. return grid for digit in range(1 , 10 ): if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Optional[int] = digit if sudoku(__SCREAMING_SNAKE_CASE ) is not None: return grid _SCREAMING_SNAKE_CASE : Union[str, Any] = 0 return None def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> None: for row in grid: for cell in row: print(__SCREAMING_SNAKE_CASE , end=""" """ ) print() if __name__ == "__main__": # make a copy of grid so that you can compare with the unmodified grid for example_grid in (initial_grid, no_solution): print('''\nExample grid:\n''' + '''=''' * 20) print_solution(example_grid) print('''\nExample grid solution:''') lowerCAmelCase_ = sudoku(example_grid) if solution is not None: print_solution(solution) else: print('''Cannot find a solution.''')
710
"""simple docstring""" import torch from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel class _snake_case ( __snake_case ): """simple docstring""" a = "M-CLIP" def __init__( self : Optional[Any] , _A : List[str]=1_0_2_4 , _A : Union[str, Any]=7_6_8 , **_A : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Tuple = transformerDimSize _SCREAMING_SNAKE_CASE : List[str] = imageDimSize super().__init__(**_A) class _snake_case ( __snake_case ): """simple docstring""" a = MCLIPConfig def __init__( self : Dict , _A : Optional[Any] , *_A : Any , **_A : Dict): """simple docstring""" super().__init__(_A , *_A , **_A) _SCREAMING_SNAKE_CASE : Tuple = XLMRobertaModel(_A) _SCREAMING_SNAKE_CASE : List[Any] = torch.nn.Linear( in_features=config.transformerDimensions , out_features=config.numDims) def _lowerCAmelCase ( self : Union[str, Any] , _A : str , _A : int): """simple docstring""" _SCREAMING_SNAKE_CASE : str = self.transformer(input_ids=_A , attention_mask=_A)[0] _SCREAMING_SNAKE_CASE : Optional[Any] = (embs * attention_mask.unsqueeze(2)).sum(dim=1) / attention_mask.sum(dim=1)[:, None] return self.LinearTransformation(_A), embs
635
0
from typing import List, Optional, Union import torch from transformers import ( XLMRobertaTokenizer, ) from ...models import UNetaDConditionModel, VQModel from ...pipelines import DiffusionPipeline from ...pipelines.pipeline_utils import ImagePipelineOutput from ...schedulers import DDIMScheduler, DDPMScheduler from ...utils import ( is_accelerate_available, is_accelerate_version, logging, randn_tensor, replace_example_docstring, ) from .text_encoder import MultilingualCLIP lowerCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name lowerCAmelCase_ = ''' Examples: ```py >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline >>> import torch >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior") >>> pipe_prior.to("cuda") >>> prompt = "red cat, 4k photo" >>> out = pipe_prior(prompt) >>> image_emb = out.image_embeds >>> negative_image_emb = out.negative_image_embeds >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1") >>> pipe.to("cuda") >>> image = pipe( ... prompt, ... image_embeds=image_emb, ... negative_image_embeds=negative_image_emb, ... height=768, ... width=768, ... num_inference_steps=100, ... ).images >>> image[0].save("cat.png") ``` ''' def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=8 )-> Union[str, Any]: _SCREAMING_SNAKE_CASE : int = h // scale_factor**2 if h % scale_factor**2 != 0: new_h += 1 _SCREAMING_SNAKE_CASE : Optional[int] = w // scale_factor**2 if w % scale_factor**2 != 0: new_w += 1 return new_h * scale_factor, new_w * scale_factor class _snake_case ( __snake_case ): """simple docstring""" def __init__( self : int , _A : MultilingualCLIP , _A : XLMRobertaTokenizer , _A : UNetaDConditionModel , _A : Union[DDIMScheduler, DDPMScheduler] , _A : VQModel , ): """simple docstring""" super().__init__() self.register_modules( text_encoder=_A , tokenizer=_A , unet=_A , scheduler=_A , movq=_A , ) _SCREAMING_SNAKE_CASE : Dict = 2 ** (len(self.movq.config.block_out_channels) - 1) def _lowerCAmelCase ( self : Optional[Any] , _A : Tuple , _A : Optional[Any] , _A : Optional[Any] , _A : Optional[int] , _A : Tuple , _A : Tuple): """simple docstring""" if latents is None: _SCREAMING_SNAKE_CASE : List[Any] = randn_tensor(_A , generator=_A , device=_A , dtype=_A) else: if latents.shape != shape: raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""") _SCREAMING_SNAKE_CASE : Union[str, Any] = latents.to(_A) _SCREAMING_SNAKE_CASE : int = latents * scheduler.init_noise_sigma return latents def _lowerCAmelCase ( self : Optional[int] , _A : str , _A : List[Any] , _A : Tuple , _A : Optional[int] , _A : Any=None , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Union[str, Any] = len(_A) if isinstance(_A , _A) else 1 # get prompt text embeddings _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer( _A , padding="""max_length""" , truncation=_A , max_length=7_7 , return_attention_mask=_A , add_special_tokens=_A , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : List[str] = text_inputs.input_ids _SCREAMING_SNAKE_CASE : List[Any] = self.tokenizer(_A , padding="""longest""" , return_tensors="""pt""").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_A , _A): _SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( """The following part of your input was truncated because CLIP can only handle sequences up to""" f""" {self.tokenizer.model_max_length} tokens: {removed_text}""") _SCREAMING_SNAKE_CASE : Optional[Any] = text_input_ids.to(_A) _SCREAMING_SNAKE_CASE : Union[str, Any] = text_inputs.attention_mask.to(_A) _SCREAMING_SNAKE_CASE : Dict = self.text_encoder( input_ids=_A , attention_mask=_A) _SCREAMING_SNAKE_CASE : Optional[Any] = prompt_embeds.repeat_interleave(_A , dim=0) _SCREAMING_SNAKE_CASE : Tuple = text_encoder_hidden_states.repeat_interleave(_A , dim=0) _SCREAMING_SNAKE_CASE : Optional[Any] = text_mask.repeat_interleave(_A , dim=0) if do_classifier_free_guidance: _SCREAMING_SNAKE_CASE : List[str] if negative_prompt is None: _SCREAMING_SNAKE_CASE : Dict = [""""""] * batch_size elif type(_A) is not type(_A): raise TypeError( f"""`negative_prompt` should be the same type to `prompt`, but got {type(_A)} !=""" f""" {type(_A)}.""") elif isinstance(_A , _A): _SCREAMING_SNAKE_CASE : Optional[int] = [negative_prompt] elif batch_size != len(_A): raise ValueError( f"""`negative_prompt`: {negative_prompt} has batch size {len(_A)}, but `prompt`:""" f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches""" """ the batch size of `prompt`.""") else: _SCREAMING_SNAKE_CASE : int = negative_prompt _SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer( _A , padding="""max_length""" , max_length=7_7 , truncation=_A , return_attention_mask=_A , add_special_tokens=_A , return_tensors="""pt""" , ) _SCREAMING_SNAKE_CASE : List[Any] = uncond_input.input_ids.to(_A) _SCREAMING_SNAKE_CASE : List[str] = uncond_input.attention_mask.to(_A) _SCREAMING_SNAKE_CASE : Optional[int] = self.text_encoder( input_ids=_A , attention_mask=_A) # duplicate unconditional embeddings for each generation per prompt, using mps friendly method _SCREAMING_SNAKE_CASE : Optional[int] = negative_prompt_embeds.shape[1] _SCREAMING_SNAKE_CASE : List[str] = negative_prompt_embeds.repeat(1 , _A) _SCREAMING_SNAKE_CASE : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _A) _SCREAMING_SNAKE_CASE : Any = uncond_text_encoder_hidden_states.shape[1] _SCREAMING_SNAKE_CASE : Optional[int] = uncond_text_encoder_hidden_states.repeat(1 , _A , 1) _SCREAMING_SNAKE_CASE : List[str] = uncond_text_encoder_hidden_states.view( batch_size * num_images_per_prompt , _A , -1) _SCREAMING_SNAKE_CASE : Dict = uncond_text_mask.repeat_interleave(_A , dim=0) # done duplicates # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes _SCREAMING_SNAKE_CASE : str = torch.cat([negative_prompt_embeds, prompt_embeds]) _SCREAMING_SNAKE_CASE : List[str] = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states]) _SCREAMING_SNAKE_CASE : str = torch.cat([uncond_text_mask, text_mask]) return prompt_embeds, text_encoder_hidden_states, text_mask def _lowerCAmelCase ( self : Dict , _A : Union[str, Any]=0): """simple docstring""" if is_accelerate_available(): from accelerate import cpu_offload else: raise ImportError("""Please install accelerate via `pip install accelerate`""") _SCREAMING_SNAKE_CASE : Dict = torch.device(f"""cuda:{gpu_id}""") _SCREAMING_SNAKE_CASE : Tuple = [ self.unet, self.text_encoder, self.movq, ] for cpu_offloaded_model in models: if cpu_offloaded_model is not None: cpu_offload(_A , _A) def _lowerCAmelCase ( self : List[Any] , _A : Optional[int]=0): """simple docstring""" if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0"""): from accelerate import cpu_offload_with_hook else: raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""") _SCREAMING_SNAKE_CASE : Optional[int] = torch.device(f"""cuda:{gpu_id}""") if self.device.type != "cpu": self.to("""cpu""" , silence_dtype_warnings=_A) torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist) _SCREAMING_SNAKE_CASE : Any = None for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]: _SCREAMING_SNAKE_CASE : List[Any] = cpu_offload_with_hook(_A , _A , prev_module_hook=_A) if self.safety_checker is not None: _SCREAMING_SNAKE_CASE : Dict = cpu_offload_with_hook(self.safety_checker , _A , prev_module_hook=_A) # We'll offload the last model manually. _SCREAMING_SNAKE_CASE : List[str] = hook @property # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if not hasattr(self.unet , """_hf_hook"""): return self.device for module in self.unet.modules(): if ( hasattr(_A , """_hf_hook""") and hasattr(module._hf_hook , """execution_device""") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device @torch.no_grad() @replace_example_docstring(_A) def __call__( self : Optional[int] , _A : Union[str, List[str]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Union[torch.FloatTensor, List[torch.FloatTensor]] , _A : Optional[Union[str, List[str]]] = None , _A : int = 5_1_2 , _A : int = 5_1_2 , _A : int = 1_0_0 , _A : float = 4.0 , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : Optional[torch.FloatTensor] = None , _A : Optional[str] = "pil" , _A : bool = True , ): """simple docstring""" if isinstance(_A , _A): _SCREAMING_SNAKE_CASE : List[Any] = 1 elif isinstance(_A , _A): _SCREAMING_SNAKE_CASE : Any = len(_A) else: raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(_A)}""") _SCREAMING_SNAKE_CASE : Tuple = self._execution_device _SCREAMING_SNAKE_CASE : Any = batch_size * num_images_per_prompt _SCREAMING_SNAKE_CASE : Optional[int] = guidance_scale > 1.0 _SCREAMING_SNAKE_CASE : List[str] = self._encode_prompt( _A , _A , _A , _A , _A) if isinstance(_A , _A): _SCREAMING_SNAKE_CASE : str = torch.cat(_A , dim=0) if isinstance(_A , _A): _SCREAMING_SNAKE_CASE : Tuple = torch.cat(_A , dim=0) if do_classifier_free_guidance: _SCREAMING_SNAKE_CASE : str = image_embeds.repeat_interleave(_A , dim=0) _SCREAMING_SNAKE_CASE : Optional[int] = negative_image_embeds.repeat_interleave(_A , dim=0) _SCREAMING_SNAKE_CASE : List[str] = torch.cat([negative_image_embeds, image_embeds] , dim=0).to( dtype=prompt_embeds.dtype , device=_A) self.scheduler.set_timesteps(_A , device=_A) _SCREAMING_SNAKE_CASE : Any = self.scheduler.timesteps _SCREAMING_SNAKE_CASE : Optional[Any] = self.unet.config.in_channels _SCREAMING_SNAKE_CASE : Dict = get_new_h_w(_A , _A , self.movq_scale_factor) # create initial latent _SCREAMING_SNAKE_CASE : List[Any] = self.prepare_latents( (batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _A , _A , _A , self.scheduler , ) for i, t in enumerate(self.progress_bar(_A)): # expand the latents if we are doing classifier free guidance _SCREAMING_SNAKE_CASE : Optional[Any] = torch.cat([latents] * 2) if do_classifier_free_guidance else latents _SCREAMING_SNAKE_CASE : Union[str, Any] = {"""text_embeds""": prompt_embeds, """image_embeds""": image_embeds} _SCREAMING_SNAKE_CASE : List[Any] = self.unet( sample=_A , timestep=_A , encoder_hidden_states=_A , added_cond_kwargs=_A , return_dict=_A , )[0] if do_classifier_free_guidance: _SCREAMING_SNAKE_CASE : List[str] = noise_pred.split(latents.shape[1] , dim=1) _SCREAMING_SNAKE_CASE : int = noise_pred.chunk(2) _SCREAMING_SNAKE_CASE : int = variance_pred.chunk(2) _SCREAMING_SNAKE_CASE : List[str] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) _SCREAMING_SNAKE_CASE : List[str] = torch.cat([noise_pred, variance_pred_text] , dim=1) if not ( hasattr(self.scheduler.config , """variance_type""") and self.scheduler.config.variance_type in ["learned", "learned_range"] ): _SCREAMING_SNAKE_CASE : Dict = noise_pred.split(latents.shape[1] , dim=1) # compute the previous noisy sample x_t -> x_t-1 _SCREAMING_SNAKE_CASE : Any = self.scheduler.step( _A , _A , _A , generator=_A , ).prev_sample # post-processing _SCREAMING_SNAKE_CASE : Optional[Any] = self.movq.decode(_A , force_not_quantize=_A)["""sample"""] if output_type not in ["pt", "np", "pil"]: raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""") if output_type in ["np", "pil"]: _SCREAMING_SNAKE_CASE : str = image * 0.5 + 0.5 _SCREAMING_SNAKE_CASE : Union[str, Any] = image.clamp(0 , 1) _SCREAMING_SNAKE_CASE : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1).float().numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE : List[str] = self.numpy_to_pil(_A) if not return_dict: return (image,) return ImagePipelineOutput(images=_A)
711
"""simple docstring""" from decimal import Decimal, getcontext from math import ceil, factorial def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> str: if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ): raise TypeError("""Undefined for non-integers""" ) elif precision < 1: raise ValueError("""Undefined for non-natural numbers""" ) _SCREAMING_SNAKE_CASE : int = precision _SCREAMING_SNAKE_CASE : Dict = ceil(precision / 14 ) _SCREAMING_SNAKE_CASE : int = 426_880 * Decimal(10_005 ).sqrt() _SCREAMING_SNAKE_CASE : Union[str, Any] = 1 _SCREAMING_SNAKE_CASE : str = 13_591_409 _SCREAMING_SNAKE_CASE : Tuple = Decimal(__SCREAMING_SNAKE_CASE ) for k in range(1 , __SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE : Tuple = factorial(6 * k ) // (factorial(3 * k ) * factorial(__SCREAMING_SNAKE_CASE ) ** 3) linear_term += 545_140_134 exponential_term *= -262_537_412_640_768_000 partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term return str(constant_term / partial_sum )[:-1] if __name__ == "__main__": lowerCAmelCase_ = 50 print(F"The first {n} digits of pi is: {pi(n)}")
635
0
"""simple docstring""" import unittest from transformers import load_tool from transformers.utils import is_torch_available if is_torch_available(): import torch from transformers.testing_utils import require_torch from .test_tools_common import ToolTesterMixin @require_torch class _snake_case ( unittest.TestCase , __snake_case ): """simple docstring""" def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : List[Any] = load_tool("""text-to-speech""") self.tool.setup() def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) _SCREAMING_SNAKE_CASE : Any = self.tool("""hey""") _SCREAMING_SNAKE_CASE : int = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , )) def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" torch.manual_seed(0) _SCREAMING_SNAKE_CASE : List[Any] = self.tool("""hey""") _SCREAMING_SNAKE_CASE : str = result.to_raw() self.assertTrue( torch.allclose( resulting_tensor[:3] , torch.tensor([-0.0_005_966_668_832_115_829, -0.0_003_657_640_190_795_064, -0.00_013_439_502_799_883_485]) , ))
712
"""simple docstring""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Optional[int]: # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file _SCREAMING_SNAKE_CASE : Optional[int] = TapasConfig.from_json_file(__SCREAMING_SNAKE_CASE ) # set absolute/relative position embeddings parameter _SCREAMING_SNAKE_CASE : Dict = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": _SCREAMING_SNAKE_CASE : str = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WTQ": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : Optional[int] = 4 _SCREAMING_SNAKE_CASE : Any = True # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 0.66_46_94 _SCREAMING_SNAKE_CASE : str = 0.20_79_51 _SCREAMING_SNAKE_CASE : str = 0.12_11_94 _SCREAMING_SNAKE_CASE : List[Any] = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Union[str, Any] = False _SCREAMING_SNAKE_CASE : Optional[Any] = 0.0_35_25_13 _SCREAMING_SNAKE_CASE : Optional[Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams _SCREAMING_SNAKE_CASE : int = 4 _SCREAMING_SNAKE_CASE : Tuple = False # hparam_utils.py hparams _SCREAMING_SNAKE_CASE : Any = 36.45_19 _SCREAMING_SNAKE_CASE : Union[str, Any] = 0.90_34_21 _SCREAMING_SNAKE_CASE : Optional[Any] = 2_22.0_88 _SCREAMING_SNAKE_CASE : Any = True _SCREAMING_SNAKE_CASE : str = True _SCREAMING_SNAKE_CASE : Optional[int] = True _SCREAMING_SNAKE_CASE : Dict = 0.76_31_41 _SCREAMING_SNAKE_CASE : Union[str, Any] = TapasForQuestionAnswering(config=__SCREAMING_SNAKE_CASE ) elif task == "TABFACT": _SCREAMING_SNAKE_CASE : int = TapasForSequenceClassification(config=__SCREAMING_SNAKE_CASE ) elif task == "MLM": _SCREAMING_SNAKE_CASE : int = TapasForMaskedLM(config=__SCREAMING_SNAKE_CASE ) elif task == "INTERMEDIATE_PRETRAINING": _SCREAMING_SNAKE_CASE : int = TapasModel(config=__SCREAMING_SNAKE_CASE ) else: raise ValueError(F"""Task {task} not supported.""" ) print(F"""Building PyTorch model from configuration: {config}""" ) # Load weights from tf checkpoint load_tf_weights_in_tapas(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Save pytorch-model (weights and configuration) print(F"""Save PyTorch model to {pytorch_dump_path}""" ) model.save_pretrained(__SCREAMING_SNAKE_CASE ) # Save tokenizer files print(F"""Save tokenizer files to {pytorch_dump_path}""" ) _SCREAMING_SNAKE_CASE : str = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + """vocab.txt""" , model_max_length=512 ) tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE ) print("""Used relative position embeddings:""" , model.config.reset_position_index_per_cell ) if __name__ == "__main__": lowerCAmelCase_ = argparse.ArgumentParser() # Required parameters parser.add_argument( '''--task''', default='''SQA''', type=str, help='''Model task for which to convert a checkpoint. Defaults to SQA.''' ) parser.add_argument( '''--reset_position_index_per_cell''', default=False, action='''store_true''', help='''Whether to use relative position embeddings or not. Defaults to True.''', ) parser.add_argument( '''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.''' ) parser.add_argument( '''--tapas_config_file''', default=None, type=str, required=True, help=( '''The config json file corresponding to the pre-trained TAPAS model. \n''' '''This specifies the model architecture.''' ), ) parser.add_argument( '''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.''' ) lowerCAmelCase_ = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
635
0
import math from typing import Optional import numpy as np from ...configuration_utils import PretrainedConfig from ...utils import logging lowerCAmelCase_ = logging.get_logger(__name__) lowerCAmelCase_ = { '''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''', '''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''', } class _snake_case ( __snake_case ): """simple docstring""" a = "encodec" def __init__( self : Union[str, Any] , _A : int=[1.5, 3.0, 6.0, 12.0, 24.0] , _A : Any=2_4_0_0_0 , _A : Dict=1 , _A : Union[str, Any]=False , _A : Optional[Any]=None , _A : Optional[int]=None , _A : List[Any]=1_2_8 , _A : Union[str, Any]=3_2 , _A : Tuple=1 , _A : int=[8, 5, 4, 2] , _A : List[Any]="weight_norm" , _A : Any=7 , _A : Any=7 , _A : Any=3 , _A : Dict=2 , _A : List[str]=True , _A : int="reflect" , _A : str=2 , _A : Union[str, Any]=2 , _A : Dict=1.0 , _A : List[str]=1_0_2_4 , _A : List[Any]=None , _A : Any=True , **_A : Union[str, Any] , ): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = target_bandwidths _SCREAMING_SNAKE_CASE : Dict = sampling_rate _SCREAMING_SNAKE_CASE : Tuple = audio_channels _SCREAMING_SNAKE_CASE : Any = normalize _SCREAMING_SNAKE_CASE : int = chunk_length_s _SCREAMING_SNAKE_CASE : List[str] = overlap _SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size _SCREAMING_SNAKE_CASE : List[Any] = num_filters _SCREAMING_SNAKE_CASE : str = num_residual_layers _SCREAMING_SNAKE_CASE : Union[str, Any] = upsampling_ratios _SCREAMING_SNAKE_CASE : int = norm_type _SCREAMING_SNAKE_CASE : List[Any] = kernel_size _SCREAMING_SNAKE_CASE : List[Any] = last_kernel_size _SCREAMING_SNAKE_CASE : Optional[int] = residual_kernel_size _SCREAMING_SNAKE_CASE : List[str] = dilation_growth_rate _SCREAMING_SNAKE_CASE : List[str] = use_causal_conv _SCREAMING_SNAKE_CASE : int = pad_mode _SCREAMING_SNAKE_CASE : List[Any] = compress _SCREAMING_SNAKE_CASE : List[Any] = num_lstm_layers _SCREAMING_SNAKE_CASE : List[str] = trim_right_ratio _SCREAMING_SNAKE_CASE : Union[str, Any] = codebook_size _SCREAMING_SNAKE_CASE : str = codebook_dim if codebook_dim is not None else hidden_size _SCREAMING_SNAKE_CASE : Any = use_conv_shortcut if self.norm_type not in ["weight_norm", "time_group_norm"]: raise ValueError( f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""") super().__init__(**_A) @property def _lowerCAmelCase ( self : List[Any]): """simple docstring""" if self.chunk_length_s is None: return None else: return int(self.chunk_length_s * self.sampling_rate) @property def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" if self.chunk_length_s is None or self.overlap is None: return None else: return max(1 , int((1.0 - self.overlap) * self.chunk_length)) @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : str = np.prod(self.upsampling_ratios) return math.ceil(self.sampling_rate / hop_length) @property def _lowerCAmelCase ( self : Union[str, Any]): """simple docstring""" return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0))
713
"""simple docstring""" from typing import Any import numpy as np def lowerCamelCase_(__SCREAMING_SNAKE_CASE )-> bool: return np.array_equal(__SCREAMING_SNAKE_CASE , matrix.conjugate().T ) def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Any: _SCREAMING_SNAKE_CASE : Optional[int] = v.conjugate().T _SCREAMING_SNAKE_CASE : Optional[int] = v_star.dot(__SCREAMING_SNAKE_CASE ) assert isinstance(__SCREAMING_SNAKE_CASE , np.ndarray ) return (v_star_dot.dot(__SCREAMING_SNAKE_CASE )) / (v_star.dot(__SCREAMING_SNAKE_CASE )) def lowerCamelCase_()-> None: _SCREAMING_SNAKE_CASE : Optional[Any] = np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] ) _SCREAMING_SNAKE_CASE : int = np.array([[1], [2], [3]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" print(rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) _SCREAMING_SNAKE_CASE : int = np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] ) assert is_hermitian(__SCREAMING_SNAKE_CASE ), F"""{a} is not hermitian.""" assert rayleigh_quotient(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) == float(3 ) if __name__ == "__main__": import doctest doctest.testmod() tests()
635
0
"""simple docstring""" import inspect from typing import List, Optional, Tuple, Union import torch from ...models import UNetaDModel, VQModel from ...schedulers import DDIMScheduler from ...utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput class _snake_case ( __snake_case ): """simple docstring""" def __init__( self : Any , _A : VQModel , _A : UNetaDModel , _A : DDIMScheduler): """simple docstring""" super().__init__() self.register_modules(vqvae=_A , unet=_A , scheduler=_A) @torch.no_grad() def __call__( self : List[str] , _A : int = 1 , _A : Optional[Union[torch.Generator, List[torch.Generator]]] = None , _A : float = 0.0 , _A : int = 5_0 , _A : Optional[str] = "pil" , _A : bool = True , **_A : List[str] , ): """simple docstring""" _SCREAMING_SNAKE_CASE = randn_tensor( (batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=_A , ) _SCREAMING_SNAKE_CASE = latents.to(self.device) # scale the initial noise by the standard deviation required by the scheduler _SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma self.scheduler.set_timesteps(_A) # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature _SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step).parameters.keys()) _SCREAMING_SNAKE_CASE = {} if accepts_eta: _SCREAMING_SNAKE_CASE = eta for t in self.progress_bar(self.scheduler.timesteps): _SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(_A , _A) # predict the noise residual _SCREAMING_SNAKE_CASE = self.unet(_A , _A).sample # compute the previous noisy sample x_t -> x_t-1 _SCREAMING_SNAKE_CASE = self.scheduler.step(_A , _A , _A , **_A).prev_sample # decode the image latents with the VAE _SCREAMING_SNAKE_CASE = self.vqvae.decode(_A).sample _SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 , 1) _SCREAMING_SNAKE_CASE = image.cpu().permute(0 , 2 , 3 , 1).numpy() if output_type == "pil": _SCREAMING_SNAKE_CASE = self.numpy_to_pil(_A) if not return_dict: return (image,) return ImagePipelineOutput(images=_A)
714
"""simple docstring""" from __future__ import annotations def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )-> tuple: if (electron_conc, hole_conc, intrinsic_conc).count(0 ) != 1: raise ValueError("""You cannot supply more or less than 2 values""" ) elif electron_conc < 0: raise ValueError("""Electron concentration cannot be negative in a semiconductor""" ) elif hole_conc < 0: raise ValueError("""Hole concentration cannot be negative in a semiconductor""" ) elif intrinsic_conc < 0: raise ValueError( """Intrinsic concentration cannot be negative in a semiconductor""" ) elif electron_conc == 0: return ( "electron_conc", intrinsic_conc**2 / hole_conc, ) elif hole_conc == 0: return ( "hole_conc", intrinsic_conc**2 / electron_conc, ) elif intrinsic_conc == 0: return ( "intrinsic_conc", (electron_conc * hole_conc) ** 0.5, ) else: return (-1, -1) if __name__ == "__main__": import doctest doctest.testmod()
635
0
"""simple docstring""" import unittest from transformers import GPTSwaTokenizer from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow from ...test_tokenization_common import TokenizerTesterMixin lowerCAmelCase_ = get_tests_dir('''fixtures/test_sentencepiece_with_bytefallback.model''') @require_sentencepiece @require_tokenizers class _snake_case ( __snake_case , unittest.TestCase ): """simple docstring""" a = GPTSwaTokenizer a = False a = True a = False def _lowerCAmelCase ( self : Optional[int]): """simple docstring""" super().setUp() # We have a SentencePiece fixture for testing _SCREAMING_SNAKE_CASE : Optional[int] = GPTSwaTokenizer(_A , eos_token="""<unk>""" , bos_token="""<unk>""" , pad_token="""<unk>""") tokenizer.save_pretrained(self.tmpdirname) def _lowerCAmelCase ( self : Any , _A : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Any = """This is a test""" _SCREAMING_SNAKE_CASE : Any = """This is a test""" return input_text, output_text def _lowerCAmelCase ( self : Any): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = """<s>""" _SCREAMING_SNAKE_CASE : Dict = 1 self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A) , _A) self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A) , _A) def _lowerCAmelCase ( self : Tuple): """simple docstring""" _SCREAMING_SNAKE_CASE : str = list(self.get_tokenizer().get_vocab().keys()) self.assertEqual(vocab_keys[0] , """<unk>""") self.assertEqual(vocab_keys[1] , """<s>""") self.assertEqual(vocab_keys[-1] , """j""") self.assertEqual(len(_A) , 2_0_0_0) def _lowerCAmelCase ( self : Dict): """simple docstring""" self.assertEqual(self.get_tokenizer().vocab_size , 2_0_0_0) def _lowerCAmelCase ( self : Optional[Any]): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = GPTSwaTokenizer(_A) _SCREAMING_SNAKE_CASE : Optional[int] = tokenizer.tokenize("""This is a test""") self.assertListEqual(_A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""]) self.assertListEqual(tokenizer.convert_tokens_to_ids(_A) , [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2]) _SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""") # fmt: off self.assertListEqual( _A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""] , ) # fmt: on _SCREAMING_SNAKE_CASE : Any = tokenizer.convert_tokens_to_ids(_A) self.assertListEqual( _A , [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0] , ) _SCREAMING_SNAKE_CASE : str = tokenizer.convert_ids_to_tokens(_A) # fmt: off self.assertListEqual( _A , ["""▁I""", """▁was""", """▁bor""", """n""", """▁in""", """▁""", """<0x39>""", """2""", """0""", """0""", """0""", """,""", """▁and""", """▁this""", """▁is""", """▁f""", """al""", """s""", """<0xC3>""", """<0xA9>""", """."""]) # fmt: on def _lowerCAmelCase ( self : int): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[Any] = GPTSwaTokenizer(_A) _SCREAMING_SNAKE_CASE : int = ["""This is a test""", """I was born in 92000, and this is falsé."""] _SCREAMING_SNAKE_CASE : Tuple = [ [4_6_5, 2_8_7, 2_6_5, 6_3_1, 8_4_2], [2_6_2, 2_7_2, 1_5_2_5, 2_8_6, 2_7_1, 2_6_8, 6_0, 9_1_6, 6_3_3, 6_3_3, 6_3_3, 2_5_9, 2_6_6, 3_0_1, 2_8_7, 3_8_4, 3_6_7, 2_6_3, 1_9_8, 1_7_2, 2_6_0], ] # Test that encode_fast returns the same as tokenize + convert_tokens_to_ids for text, expected_ids in zip(_A , _A): self.assertListEqual(tokenizer.encode_fast(_A) , _A) # Test that decode_fast returns the input text for text, token_ids in zip(_A , _A): self.assertEqual(tokenizer.decode_fast(_A) , _A) @slow def _lowerCAmelCase ( self : Dict): """simple docstring""" _SCREAMING_SNAKE_CASE : Optional[int] = [ """<|python|>def fibonacci(n)\n if n < 0:\n print('Incorrect input')""", """Hey there, how are you doing this fine day?""", """This is a text with a trailing spaces followed by a dot .""", """Häj sväjs lillebrör! =)""", """Det är inget fel på Mr. Cool""", ] # fmt: off _SCREAMING_SNAKE_CASE : Optional[int] = {"""input_ids""": [[6_3_4_2_3, 5, 6_8_1_1, 1_4_9_5_4, 2_8_2, 8_1_6, 3_8_2_1, 6_3_4_6_6, 6_3_4_2_5, 6_3_4_6_2, 1_8, 6_3_9_7_8, 6_7_8, 3_0_1, 1_3_2_0, 6_3_4_2_3, 6_3_4_5_5, 6_3_4_5_8, 1_8, 6_3_9_8_2, 4_2_4_6, 3_9_4_0, 1_9_0_1, 4_7_7_8_9, 5_5_4_7, 1_8_9_9_4], [1_9_6_3_0, 1_1_0_0, 6_3_4_4_6, 1_3_4_2, 6_3_3, 5_4_4, 4_4_8_8, 5_9_3, 5_1_0_2, 2_4_1_6, 6_3_4_9_5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_6_5_2, 4_2_8, 2_6_8, 1_9_3_6, 5_1_5, 2_6_8, 5_8_5_9_3, 2_2_4_1_3, 9_1_0_6, 5_4_6, 2_6_8, 3_3_2_1_3, 6_3_9_7_9, 6_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_5_1_3_0, 6_3_4_5_0, 9_2_4, 6_3_4_4_9, 2_2_4_9, 4_0_6_2, 1_5_5_8, 3_1_8, 6_3_5_0_4, 2_1_4_9_8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [5_0_9, 3_7_7, 2_8_2_7, 2_5_5_9, 3_3_2, 6_5_7_5, 6_3_4_4_3, 2_6_8_0_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # fmt: on self.tokenizer_integration_test_util( expected_encoding=_A , model_name="""AI-Sweden/gpt-sw3-126m""" , sequences=_A , )
715
"""simple docstring""" import argparse from typing import List import evaluate import numpy as np import torch from datasets import DatasetDict, load_dataset # New Code # # We'll be using StratifiedKFold for this example from sklearn.model_selection import StratifiedKFold from torch.optim import AdamW from torch.utils.data import DataLoader from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed from accelerate import Accelerator, DistributedType ######################################################################## # This is a fully working simple example to use Accelerate, # specifically showcasing how to perform Cross Validation, # and builds off the `nlp_example.py` script. # # This example trains a Bert base model on GLUE MRPC # in any of the following settings (with the same script): # - single CPU or single GPU # - multi GPUS (using PyTorch distributed mode) # - (multi) TPUs # - fp16 (mixed-precision) or fp32 (normal precision) # # To help focus on the differences in the code, building `DataLoaders` # was refactored into its own function. # New additions from the base script can be found quickly by # looking for the # New Code # tags # # To run it in each of these various modes, follow the instructions # in the readme for examples: # https://github.com/huggingface/accelerate/tree/main/examples # ######################################################################## lowerCAmelCase_ = 16 lowerCAmelCase_ = 32 def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 )-> str: _SCREAMING_SNAKE_CASE : int = AutoTokenizer.from_pretrained("""bert-base-cased""" ) _SCREAMING_SNAKE_CASE : Union[str, Any] = DatasetDict( { """train""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """validation""": dataset["""train"""].select(__SCREAMING_SNAKE_CASE ), """test""": dataset["""validation"""], } ) def tokenize_function(__SCREAMING_SNAKE_CASE ): # max_length=None => use the model max length (it's actually the default) _SCREAMING_SNAKE_CASE : Union[str, Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE ) return outputs # Apply the method we just defined to all the examples in all the splits of the dataset # starting with the main process first: with accelerator.main_process_first(): _SCREAMING_SNAKE_CASE : str = datasets.map( __SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , ) # We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the # transformers library _SCREAMING_SNAKE_CASE : Any = tokenized_datasets.rename_column("""label""" , """labels""" ) def collate_fn(__SCREAMING_SNAKE_CASE ): # On TPU it's best to pad everything to the same length or training will be very slow. _SCREAMING_SNAKE_CASE : Any = 128 if accelerator.distributed_type == DistributedType.TPU else None # When using mixed precision we want round multiples of 8/16 if accelerator.mixed_precision == "fp8": _SCREAMING_SNAKE_CASE : Optional[Any] = 16 elif accelerator.mixed_precision != "no": _SCREAMING_SNAKE_CASE : Any = 8 else: _SCREAMING_SNAKE_CASE : Optional[int] = None return tokenizer.pad( __SCREAMING_SNAKE_CASE , padding="""longest""" , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , ) # Instantiate dataloaders. _SCREAMING_SNAKE_CASE : int = DataLoader( tokenized_datasets["""train"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Optional[int] = DataLoader( tokenized_datasets["""validation"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = DataLoader( tokenized_datasets["""test"""] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE ) return train_dataloader, eval_dataloader, test_dataloader def lowerCamelCase_(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )-> Dict: # New Code # _SCREAMING_SNAKE_CASE : Union[str, Any] = [] # Download the dataset _SCREAMING_SNAKE_CASE : Union[str, Any] = load_dataset("""glue""" , """mrpc""" ) # Create our splits _SCREAMING_SNAKE_CASE : Dict = StratifiedKFold(n_splits=int(args.num_folds ) ) # Initialize accelerator _SCREAMING_SNAKE_CASE : Any = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision ) # Sample hyper-parameters for learning rate, batch size, seed and a few other HPs _SCREAMING_SNAKE_CASE : Tuple = config["""lr"""] _SCREAMING_SNAKE_CASE : Tuple = int(config["""num_epochs"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""seed"""] ) _SCREAMING_SNAKE_CASE : int = int(config["""batch_size"""] ) _SCREAMING_SNAKE_CASE : List[str] = evaluate.load("""glue""" , """mrpc""" ) # If the batch size is too big we use gradient accumulation _SCREAMING_SNAKE_CASE : Any = 1 if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU: _SCREAMING_SNAKE_CASE : List[str] = batch_size // MAX_GPU_BATCH_SIZE _SCREAMING_SNAKE_CASE : List[str] = MAX_GPU_BATCH_SIZE set_seed(__SCREAMING_SNAKE_CASE ) # New Code # # Create our folds: _SCREAMING_SNAKE_CASE : List[str] = kfold.split(np.zeros(datasets["""train"""].num_rows ) , datasets["""train"""]["""label"""] ) _SCREAMING_SNAKE_CASE : Optional[Any] = [] # Iterate over them for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ): _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = get_fold_dataloaders( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) # Instantiate the model (we build the model here so that the seed also control new weights initialization) _SCREAMING_SNAKE_CASE : Any = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=__SCREAMING_SNAKE_CASE ) # We could avoid this line since the accelerator is set with `device_placement=True` (default value). # Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer # creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that). _SCREAMING_SNAKE_CASE : Tuple = model.to(accelerator.device ) # Instantiate optimizer _SCREAMING_SNAKE_CASE : int = AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE ) # Instantiate scheduler _SCREAMING_SNAKE_CASE : int = get_linear_schedule_with_warmup( optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps , ) # Prepare everything # There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the # prepare method. _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Union[str, Any] = accelerator.prepare( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) # Now we train the model for epoch in range(__SCREAMING_SNAKE_CASE ): model.train() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) _SCREAMING_SNAKE_CASE : Optional[Any] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : Dict = outputs.loss _SCREAMING_SNAKE_CASE : List[Any] = loss / gradient_accumulation_steps accelerator.backward(__SCREAMING_SNAKE_CASE ) if step % gradient_accumulation_steps == 0: optimizer.step() lr_scheduler.step() optimizer.zero_grad() model.eval() for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : str = outputs.logits.argmax(dim=-1 ) _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) metric.add_batch( predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , ) _SCREAMING_SNAKE_CASE : Optional[int] = metric.compute() # Use accelerator.print to print only on the main process. accelerator.print(F"""epoch {epoch}:""" , __SCREAMING_SNAKE_CASE ) # New Code # # We also run predictions on the test set at the very end _SCREAMING_SNAKE_CASE : str = [] for step, batch in enumerate(__SCREAMING_SNAKE_CASE ): # We could avoid this line since we set the accelerator with `device_placement=True`. batch.to(accelerator.device ) with torch.no_grad(): _SCREAMING_SNAKE_CASE : List[str] = model(**__SCREAMING_SNAKE_CASE ) _SCREAMING_SNAKE_CASE : List[Any] = outputs.logits _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : List[Any] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) ) fold_predictions.append(predictions.cpu() ) if i == 0: # We need all of the test predictions test_references.append(references.cpu() ) # Use accelerator.print to print only on the main process. test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) ) # We now need to release all our memory and get rid of the current model, optimizer, etc accelerator.free_memory() # New Code # # Finally we check the accuracy of our folded results: _SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(__SCREAMING_SNAKE_CASE , dim=0 ) _SCREAMING_SNAKE_CASE : List[str] = torch.stack(__SCREAMING_SNAKE_CASE , dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 ) _SCREAMING_SNAKE_CASE : int = metric.compute(predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE ) accelerator.print("""Average test metrics from all folds:""" , __SCREAMING_SNAKE_CASE ) def lowerCamelCase_()-> Optional[Any]: _SCREAMING_SNAKE_CASE : Any = argparse.ArgumentParser(description="""Simple example of training script.""" ) parser.add_argument( """--mixed_precision""" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose""" """between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.""" """and an Nvidia Ampere GPU.""" , ) parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" ) # New Code # parser.add_argument("""--num_folds""" , type=__SCREAMING_SNAKE_CASE , default=3 , help="""The number of splits to perform across the dataset""" ) _SCREAMING_SNAKE_CASE : List[Any] = parser.parse_args() _SCREAMING_SNAKE_CASE : Optional[int] = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16} training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) if __name__ == "__main__": main()
635
0