code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__a = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__a = 2_5_0_0_0_4
__a = 2_5_0_0_2_0
@require_sentencepiece
@require_tokenizers
class lowercase__( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
a :Dict = MBartaaTokenizer
a :List[Any] = MBartaaTokenizerFast
a :Optional[int] = True
a :str = True
def _lowercase ( self : int ) -> str:
super().setUp()
# We have a SentencePiece fixture for testing
lowercase_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : str ) -> Tuple:
lowercase_ = '''<s>'''
lowercase_ = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def _lowercase ( self : Tuple ) -> Optional[Any]:
lowercase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(_A ) , 1_0_5_4 )
def _lowercase ( self : Any ) -> str:
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_5_4 )
def _lowercase ( self : List[str] ) -> str:
lowercase_ = MBartaaTokenizer(_A , src_lang='''en_XX''' , tgt_lang='''ro_RO''' , keep_accents=_A )
lowercase_ = tokenizer.tokenize('''This is a test''' )
self.assertListEqual(_A , ['''▁This''', '''▁is''', '''▁a''', '''▁t''', '''est'''] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
lowercase_ = tokenizer.tokenize('''I was born in 92000, and this is falsé.''' )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''9''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''é''', '''.'''] , )
lowercase_ = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
lowercase_ = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [SPIECE_UNDERLINE + '''I''', SPIECE_UNDERLINE + '''was''', SPIECE_UNDERLINE + '''b''', '''or''', '''n''', SPIECE_UNDERLINE + '''in''', SPIECE_UNDERLINE + '''''', '''<unk>''', '''2''', '''0''', '''0''', '''0''', ''',''', SPIECE_UNDERLINE + '''and''', SPIECE_UNDERLINE + '''this''', SPIECE_UNDERLINE + '''is''', SPIECE_UNDERLINE + '''f''', '''al''', '''s''', '''<unk>''', '''.'''] , )
@slow
def _lowercase ( self : List[Any] ) -> Union[str, Any]:
lowercase_ = {'''input_ids''': [[2_5_0_0_0_4, 1_1_0_6_2, 8_2_7_7_2, 7, 1_5, 8_2_7_7_2, 5_3_8, 5_1_5_2_9, 2_3_7, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 2_1_5_1_7_5, 1_3_1_4, 1_3_6, 1_7_1_9_8, 1_2_9_0, 2_0_6, 9, 5_6_3_5_9, 4_2, 1_2_2_0_0_9, 9, 1_6_4_6_6, 1_6, 8_7_3_4_4, 4_5_3_7, 9, 4_7_1_7, 7_8_3_8_1, 6, 1_5_9_9_5_8, 7, 1_5, 2_4_4_8_0, 6_1_8, 4, 5_2_7, 2_2_6_9_3, 5_4_2_8, 4, 2_7_7_7, 2_4_4_8_0, 9_8_7_4, 4, 4_3_5_2_3, 5_9_4, 4, 8_0_3, 1_8_3_9_2, 3_3_1_8_9, 1_8, 4, 4_3_5_2_3, 2_4_4_4_7, 1_2_3_9_9, 1_0_0, 2_4_9_5_5, 8_3_6_5_8, 9_6_2_6, 1_4_4_0_5_7, 1_5, 8_3_9, 2_2_3_3_5, 1_6, 1_3_6, 2_4_9_5_5, 8_3_6_5_8, 8_3_4_7_9, 1_5, 3_9_1_0_2, 7_2_4, 1_6, 6_7_8, 6_4_5, 2_7_8_9, 1_3_2_8, 4_5_8_9, 4_2, 1_2_2_0_0_9, 1_1_5_7_7_4, 2_3, 8_0_5, 1_3_2_8, 4_6_8_7_6, 7, 1_3_6, 5_3_8_9_4, 1_9_4_0, 4_2_2_2_7, 4_1_1_5_9, 1_7_7_2_1, 8_2_3, 4_2_5, 4, 2_7_5_1_2, 9_8_7_2_2, 2_0_6, 1_3_6, 5_5_3_1, 4_9_7_0, 9_1_9, 1_7_3_3_6, 5, 2], [2_5_0_0_0_4, 2_0_0_8_0, 6_1_8, 8_3, 8_2_7_7_5, 4_7, 4_7_9, 9, 1_5_1_7, 7_3, 5_3_8_9_4, 3_3_3, 8_0_5_8_1, 1_1_0_1_1_7, 1_8_8_1_1, 5_2_5_6, 1_2_9_5, 5_1, 1_5_2_5_2_6, 2_9_7, 7_9_8_6, 3_9_0, 1_2_4_4_1_6, 5_3_8, 3_5_4_3_1, 2_1_4, 9_8, 1_5_0_4_4, 2_5_7_3_7, 1_3_6, 7_1_0_8, 4_3_7_0_1, 2_3, 7_5_6, 1_3_5_3_5_5, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [2_5_0_0_0_4, 5_8_1, 6_3_7_7_3, 1_1_9_4_5_5, 6, 1_4_7_7_9_7, 8_8_2_0_3, 7, 6_4_5, 7_0, 2_1, 3_2_8_5, 1_0_2_6_9, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''facebook/mbart-large-50''' , revision='''d3913889c59cd5c9e456b269c376325eabad57e2''' , )
def _lowercase ( self : Any ) -> str:
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
lowercase_ = (self.rust_tokenizer_class, '''hf-internal-testing/tiny-random-mbart50''', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ = self.rust_tokenizer_class.from_pretrained(_A , **_A )
lowercase_ = self.tokenizer_class.from_pretrained(_A , **_A )
lowercase_ = tempfile.mkdtemp()
lowercase_ = tokenizer_r.save_pretrained(_A )
lowercase_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
lowercase_ = tuple(f for f in tokenizer_r_files if '''tokenizer.json''' not in f )
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase_ = tokenizer_r.from_pretrained(_A )
lowercase_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=True
lowercase_ = tempfile.mkdtemp()
lowercase_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase_ = tokenizer_p.save_pretrained(_A )
# Checks it save with the same files
self.assertSequenceEqual(_A , _A )
# Checks everything loads correctly in the same way
lowercase_ = tokenizer_r.from_pretrained(_A )
lowercase_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
# Save tokenizer rust, legacy_format=False
lowercase_ = tempfile.mkdtemp()
lowercase_ = tokenizer_r.save_pretrained(_A , legacy_format=_A )
lowercase_ = tokenizer_p.save_pretrained(_A )
# Checks it saved the tokenizer.json file
self.assertTrue(any('''tokenizer.json''' in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
lowercase_ = tokenizer_r.from_pretrained(_A )
lowercase_ = tokenizer_p.from_pretrained(_A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_A , _A ) )
shutil.rmtree(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__( unittest.TestCase ):
"""simple docstring"""
a :Dict = '''facebook/mbart-large-50-one-to-many-mmt'''
a :str = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
a :Dict = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
a :List[Any] = [EN_CODE, 8_274, 127_873, 25_916, 7, 8_622, 2_071, 438, 67_485, 53, 187_895, 23, 51_712, 2]
@classmethod
def _lowercase ( cls : int ) -> List[str]:
lowercase_ = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang='''en_XX''' , tgt_lang='''ro_RO''' )
lowercase_ = 1
return cls
def _lowercase ( self : Optional[int] ) -> str:
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ar_AR'''] , 2_5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''en_EN'''] , 2_5_0_0_0_4 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''ro_RO'''] , 2_5_0_0_2_0 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids['''mr_IN'''] , 2_5_0_0_3_8 )
def _lowercase ( self : Optional[Any] ) -> str:
lowercase_ = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _A )
def _lowercase ( self : int ) -> Tuple:
self.assertIn(_A , self.tokenizer.all_special_ids )
lowercase_ = [RO_CODE, 8_8_4, 9_0_1_9, 9_6, 9, 9_1_6, 8_6_7_9_2, 3_6, 1_8_7_4_3, 1_5_5_9_6, 5, 2]
lowercase_ = self.tokenizer.decode(_A , skip_special_tokens=_A )
lowercase_ = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_A )
self.assertEqual(_A , _A )
self.assertNotIn(self.tokenizer.eos_token , _A )
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = ['''this is gunna be a long sentence ''' * 2_0]
assert isinstance(src_text[0] , _A )
lowercase_ = 1_0
lowercase_ = self.tokenizer(_A , max_length=_A , truncation=_A ).input_ids[0]
self.assertEqual(ids[0] , _A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(_A ) , _A )
def _lowercase ( self : str ) -> str:
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(['''<mask>''', '''ar_AR'''] ) , [2_5_0_0_5_3, 2_5_0_0_0_1] )
def _lowercase ( self : Any ) -> str:
lowercase_ = tempfile.mkdtemp()
lowercase_ = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_A )
lowercase_ = MBartaaTokenizer.from_pretrained(_A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _A )
@require_torch
def _lowercase ( self : Dict ) -> Optional[int]:
lowercase_ = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=_A , return_tensors='''pt''' )
lowercase_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def _lowercase ( self : Optional[int] ) -> int:
lowercase_ = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_A , truncation=_A , max_length=len(self.expected_src_tokens ) , return_tensors='''pt''' , )
lowercase_ = shift_tokens_right(batch['''labels'''] , self.tokenizer.pad_token_id )
self.assertIsInstance(_A , _A )
self.assertEqual((2, 1_4) , batch.input_ids.shape )
self.assertEqual((2, 1_4) , batch.attention_mask.shape )
lowercase_ = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def _lowercase ( self : Union[str, Any] ) -> Union[str, Any]:
lowercase_ = self.tokenizer(self.src_text , padding=_A , truncation=_A , max_length=3 , return_tensors='''pt''' )
lowercase_ = self.tokenizer(
text_target=self.tgt_text , padding=_A , truncation=_A , max_length=1_0 , return_tensors='''pt''' )
lowercase_ = targets['''input_ids''']
lowercase_ = shift_tokens_right(_A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _lowercase ( self : str ) -> Optional[Any]:
lowercase_ = self.tokenizer._build_translation_inputs(
'''A test''' , return_tensors='''pt''' , src_lang='''en_XX''' , tgt_lang='''ar_AR''' )
self.assertEqual(
nested_simplify(_A ) , {
# en_XX, A, test, EOS
'''input_ids''': [[2_5_0_0_0_4, 6_2, 3_0_3_4, 2]],
'''attention_mask''': [[1, 1, 1, 1]],
# ar_AR
'''forced_bos_token_id''': 2_5_0_0_0_1,
} , )
| 30
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 0
|
"""simple docstring"""
import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
A: Any = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = DebertaVaTokenizer
__lowerCAmelCase : Tuple = DebertaVaTokenizerFast
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = True
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase : Tuple = DebertaVaTokenizer(_A , unk_token="""<unk>""" )
tokenizer.save_pretrained(self.tmpdirname )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
UpperCAmelCase : Tuple = '''this is a test'''
UpperCAmelCase : Union[str, Any] = '''this is a test'''
return input_text, output_text
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = '''<pad>'''
UpperCAmelCase : Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """[PAD]""" )
self.assertEqual(len(_A ) , 30001 )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 30000 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : Optional[Any] = ''' \tHeLLo!how \n Are yoU? '''
UpperCAmelCase : Tuple = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
UpperCAmelCase : Any = DebertaVaTokenizer(_A , do_lower_case=_A )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
UpperCAmelCase : List[str] = DebertaVaTokenizerFast(_A , do_lower_case=_A )
UpperCAmelCase : Any = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("""There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.""" )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Any = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
UpperCAmelCase : Union[str, Any] = DebertaVaTokenizer(_A , split_by_punct=_A )
UpperCAmelCase : Tuple = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
UpperCAmelCase : List[str] = DebertaVaTokenizerFast(_A , split_by_punct=_A )
UpperCAmelCase : List[str] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : List[str] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : List[str] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
UpperCAmelCase : Tuple = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Any = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
UpperCAmelCase : Optional[int] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Optional[int] = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
UpperCAmelCase : Any = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
UpperCAmelCase : Tuple = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Optional[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Any = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
UpperCAmelCase : Dict = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
UpperCAmelCase : Tuple = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Union[str, Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : List[str] = ''' \tHeLLo!how \n Are yoU? '''
UpperCAmelCase : Optional[Any] = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
UpperCAmelCase : Optional[int] = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Dict = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
UpperCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
UpperCAmelCase : Optional[int] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : str = self.get_tokenizer()
UpperCAmelCase : Optional[int] = self.get_rust_tokenizer()
UpperCAmelCase : List[str] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
UpperCAmelCase : List[Any] = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
UpperCAmelCase : Optional[Any] = tokenizer.encode(_A , add_special_tokens=_A )
UpperCAmelCase : int = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : int = self.get_rust_tokenizer()
UpperCAmelCase : str = tokenizer.encode(_A )
UpperCAmelCase : List[str] = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = '''This is a test'''
UpperCAmelCase : Optional[Any] = [13, 1, 4398, 25, 21, 1289]
UpperCAmelCase : Dict = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
UpperCAmelCase : Optional[Any] = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
UpperCAmelCase : Optional[Any] = DebertaVaTokenizer(_A , keep_accents=_A )
UpperCAmelCase : Union[str, Any] = DebertaVaTokenizerFast(_A , keep_accents=_A )
UpperCAmelCase : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : int = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : List[Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : List[str] = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : Optional[Any] = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : str = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
UpperCAmelCase : List[str] = '''I was born in 92000, and this is falsé.'''
UpperCAmelCase : str = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
UpperCAmelCase : int = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
UpperCAmelCase : Any = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
UpperCAmelCase : Dict = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : Dict = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : Union[str, Any] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : str = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : Tuple = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
UpperCAmelCase : Dict = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Any = DebertaVaTokenizer(_A )
UpperCAmelCase : Any = tokenizer.encode("""sequence builders""" )
UpperCAmelCase : int = tokenizer.encode("""multi-sequence build""" )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(_A )
UpperCAmelCase : str = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : List[str] = {'''input_ids''': [[1, 39867, 36, 19390, 486, 27, 35052, 81436, 18, 60685, 1225, 7, 35052, 81436, 18, 9367, 16899, 18, 15937, 53, 594, 773, 18, 16287, 30465, 36, 15937, 6, 41139, 38, 36979, 60763, 191, 6, 34132, 99, 6, 50538, 390, 43230, 6, 34132, 2779, 20850, 14, 699, 1072, 1194, 36, 382, 10901, 53, 7, 699, 1072, 2084, 36, 20422, 630, 53, 19, 105, 3049, 1896, 1053, 16899, 1506, 11, 37978, 4243, 7, 1237, 31869, 200, 16566, 654, 6, 35052, 81436, 7, 55630, 13593, 4, 2], [1, 26, 15011, 13, 667, 8, 1053, 18, 23611, 1237, 72356, 12820, 34, 104134, 1209, 35, 13313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 15785, 14951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name="""microsoft/deberta-v2-xlarge""" , revision="""ad6e42c1532ddf3a15c39246b63f5559d558b670""" , )
| 109
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 0
|
from collections import namedtuple
import requests
from lxml import html # type: ignore
UpperCAmelCase_ = namedtuple('covid_data', 'cases deaths recovered')
def lowerCamelCase__ ( A__ : Dict = "https://www.worldometers.info/coronavirus/" ):
'''simple docstring'''
__lowerCamelCase = '''//div[@class = "maincounter-number"]/span/text()'''
return covid_data(*html.fromstring(requests.get(A__ ).content ).xpath(A__ ) )
UpperCAmelCase_ = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 12
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 0
|
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class __magic_name__ ( _lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = CanineTokenizer
__UpperCamelCase = False
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
super().setUp()
A_ : List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE ( self :Tuple ):
'''simple docstring'''
return CanineTokenizer.from_pretrained("google/canine-s" )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , **snake_case :Tuple ):
'''simple docstring'''
A_ : Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
A_ : List[Any] = 1_024
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : Union[str, Any] = self.canine_tokenizer
A_ : List[str] = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
A_ : Union[str, Any] = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
A_ : Any = tokenizer(_A , padding=_A , return_tensors="pt" )
self.assertIsInstance(_A , _A )
A_ : Optional[int] = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : Optional[Any] = self.canine_tokenizer
A_ : Any = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
A_ : Dict = tokenizer(_A , padding=_A , return_tensors="pt" )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn("input_ids" , _A )
self.assertIn("attention_mask" , _A )
self.assertIn("token_type_ids" , _A )
@require_torch
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.canine_tokenizer
A_ : List[Any] = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
A_ : Dict = tokenizer(
text_target=_A , max_length=32 , padding="max_length" , truncation=_A , return_tensors="pt" )
self.assertEqual(32 , targets["input_ids"].shape[1] )
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : List[str] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
A_ : Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
A_ : List[str] = tempfile.mkdtemp()
A_ : Optional[int] = ''' He is very happy, UNwant\u00E9d,running'''
A_ : List[Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
A_ : Dict = tokenizer.__class__.from_pretrained(_A )
A_ : Any = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
A_ : Dict = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
A_ : List[str] = tempfile.mkdtemp()
A_ : Any = ''' He is very happy, UNwant\u00E9d,running'''
A_ : Optional[Any] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
A_ : int = chr(0xe0_07 )
additional_special_tokens.append(_A )
tokenizer.add_special_tokens({"additional_special_tokens": additional_special_tokens} )
A_ : Any = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
A_ : Any = tokenizer.__class__.from_pretrained(_A )
A_ : Any = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn(_A , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
A_ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
A_ : Dict = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : Tuple = self.get_clean_sequence(_A )
# a special token for Canine can be defined as follows:
A_ : int = 0xe0_05
A_ : Optional[Any] = chr(_A )
tokenizer.add_special_tokens({"cls_token": special_token} )
A_ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
self.assertEqual(len(_A ) , 1 )
A_ : Optional[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_A )
A_ : str = tokenizer.encode(_A , add_special_tokens=_A )
A_ : int = tokenizer.encode(_A , add_special_tokens=_A )
A_ : List[Any] = tokenizer.encode(_A , add_special_tokens=_A )
self.assertEqual(_A , input_encoded + special_token_id )
A_ : int = tokenizer.decode(_A , skip_special_tokens=_A )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
A_ : Tuple = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : Any = chr(0xe0_05 )
A_ : str = chr(0xe0_06 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_A )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({"additional_special_tokens": [SPECIAL_TOKEN_2]} )
A_ : Tuple = tokenizer.tokenize(_A )
A_ : Any = tokenizer.tokenize(_A )
self.assertEqual(len(_A ) , 1 )
self.assertEqual(len(_A ) , 1 )
self.assertEqual(token_a[0] , _A )
self.assertEqual(token_a[0] , _A )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
A_ : Union[str, Any] = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# a special token for Canine can be defined as follows:
A_ : Any = 0xe0_06
A_ : Optional[Any] = chr(_A )
A_ : List[Any] = AddedToken(_A , lstrip=_A )
tokenizer.add_special_tokens({"additional_special_tokens": [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_A )
tokenizer.from_pretrained(_A )
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
A_ : str = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , "special_tokens_map.json" ) , encoding="utf-8" ) as json_file:
A_ : Dict = json.load(_A )
with open(os.path.join(_A , "tokenizer_config.json" ) , encoding="utf-8" ) as json_file:
A_ : str = json.load(_A )
# a special token for Canine can be defined as follows:
A_ : Union[str, Any] = 0xe0_06
A_ : Union[str, Any] = chr(_A )
A_ : List[str] = [new_token_a]
A_ : Optional[int] = [new_token_a]
with open(os.path.join(_A , "special_tokens_map.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , "tokenizer_config.json" ) , "w" , encoding="utf-8" ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
A_ : List[str] = tokenizer_class.from_pretrained(_A , extra_ids=0 )
self.assertIn(_A , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
A_ : str = 0xe0_07
A_ : List[str] = chr(_A )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
A_ : str = [AddedToken(_A , lstrip=_A )]
A_ : List[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , extra_ids=0 )
self.assertIn(_A , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : Optional[Any] = self.get_tokenizers(do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : Optional[Any] = '''hello world'''
if self.space_between_special_tokens:
A_ : Tuple = '''[CLS] hello world [SEP]'''
else:
A_ : Dict = input
A_ : int = tokenizer.encode(_A , add_special_tokens=_A )
A_ : Union[str, Any] = tokenizer.decode(_A , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_A , [output, output.lower()] )
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
A_ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
A_ : Tuple = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
A_ : Optional[int] = '''a'''
A_ : Optional[Any] = ord(_A )
for attr in attributes_list:
setattr(_A , attr + "_id" , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + "_id" ) , _A )
setattr(_A , attr + "_id" , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + "_id" ) , _A )
setattr(_A , "additional_special_tokens_ids" , [] )
self.assertListEqual(getattr(_A , "additional_special_tokens" ) , [] )
self.assertListEqual(getattr(_A , "additional_special_tokens_ids" ) , [] )
A_ : str = 0xe0_06
A_ : str = chr(_A )
setattr(_A , "additional_special_tokens_ids" , [additional_special_token_id] )
self.assertListEqual(getattr(_A , "additional_special_tokens" ) , [additional_special_token] )
self.assertListEqual(getattr(_A , "additional_special_tokens_ids" ) , [additional_special_token_id] )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Any ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
pass
| 300
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 0
|
"""simple docstring"""
import math
def UpperCamelCase__ ( lowercase__ : Tuple , lowercase__ : List[str] ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(lowercase__ ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="malus_law")
| 148
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
def _snake_case ( lowercase__ , lowercase__ , lowercase__ , ):
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('You cannot supply more or less than 2 values' )
elif stress < 0:
raise ValueError('Stress cannot be negative' )
elif tangential_force < 0:
raise ValueError('Tangential Force cannot be negative' )
elif area < 0:
raise ValueError('Area cannot be negative' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_gpt_neo": ["GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTNeoConfig", "GPTNeoOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTNeoForCausalLM",
"GPTNeoForQuestionAnswering",
"GPTNeoForSequenceClassification",
"GPTNeoForTokenClassification",
"GPTNeoModel",
"GPTNeoPreTrainedModel",
"load_tf_weights_in_gpt_neo",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FlaxGPTNeoForCausalLM",
"FlaxGPTNeoModel",
"FlaxGPTNeoPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 247
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 0
|
"""simple docstring"""
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
_a = """\\n Text data.\n Second line of data."""
_a = """file"""
@pytest.fixture(scope='''session''' )
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_UpperCamelCase = bytes(__snake_case, '''utf-8''' )
with zstd.open(__snake_case, '''wb''' ) as f:
f.write(__snake_case )
return path
@pytest.fixture
def lowerCamelCase__ ( __snake_case ) -> Optional[Any]:
"""simple docstring"""
with open(os.path.join(tmpfs.local_root_dir, __snake_case ), '''w''' ) as f:
f.write(__snake_case )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''', ['''gzip''', '''xz''', '''zstd'''] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> List[str]:
"""simple docstring"""
_UpperCamelCase = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_UpperCamelCase = input_paths[compression_format]
_UpperCamelCase = tmp_path / '''cache'''
_UpperCamelCase = DownloadConfig(cache_dir=__snake_case, extract_compressed_file=__snake_case )
_UpperCamelCase = cached_path(__snake_case, download_config=__snake_case )
with open(__snake_case ) as f:
_UpperCamelCase = f.read()
with open(__snake_case ) as f:
_UpperCamelCase = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''', [True, False] )
@pytest.mark.parametrize('''default_cache_dir''', [True, False] )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = '''custom_cache'''
_UpperCamelCase = '''custom_extracted_dir'''
_UpperCamelCase = tmp_path / '''custom_extracted_path'''
if default_extracted:
_UpperCamelCase = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''', __snake_case )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''', str(__snake_case ) )
_UpperCamelCase = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_UpperCamelCase = xz_file
_UpperCamelCase = (
DownloadConfig(extract_compressed_file=__snake_case )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir, extract_compressed_file=__snake_case )
)
_UpperCamelCase = cached_path(__snake_case, download_config=__snake_case )
assert Path(__snake_case ).parent.parts[-2:] == expected
def lowerCamelCase__ ( __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = str(Path(__snake_case ).resolve() )
assert cached_path(__snake_case ) == text_file
# relative path
_UpperCamelCase = str(Path(__snake_case ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(__snake_case ) == text_file
def lowerCamelCase__ ( __snake_case ) -> int:
"""simple docstring"""
_UpperCamelCase = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(__snake_case ):
cached_path(__snake_case )
# relative path
_UpperCamelCase = '''./__missing_file__.txt'''
with pytest.raises(__snake_case ):
cached_path(__snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = get_from_cache(F'''tmp://{tmpfs_file}''' )
with open(__snake_case ) as f:
_UpperCamelCase = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''', __snake_case )
def lowerCamelCase__ ( ) -> int:
"""simple docstring"""
with pytest.raises(__snake_case ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''', __snake_case )
def lowerCamelCase__ ( __snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__snake_case ):
http_get('''https://huggingface.co''', temp_file=__snake_case )
with pytest.raises(__snake_case ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''', __snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__snake_case ):
ftp_get('''ftp://huggingface.co''', temp_file=__snake_case )
with pytest.raises(__snake_case ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''', __snake_case )
def lowerCamelCase__ ( __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(__snake_case ):
fsspec_get('''s3://huggingface.co''', temp_file=__snake_case )
with pytest.raises(__snake_case ):
fsspec_head('''s3://huggingface.co''' )
| 194
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 0
|
'''simple docstring'''
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : int ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(snake_case_ ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 0
|
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
_lowercase: Optional[Any] = {
"E": 12.70,
"T": 9.06,
"A": 8.17,
"O": 7.51,
"I": 6.97,
"N": 6.75,
"S": 6.33,
"H": 6.09,
"R": 5.99,
"D": 4.25,
"L": 4.03,
"C": 2.78,
"U": 2.76,
"M": 2.41,
"W": 2.36,
"F": 2.23,
"G": 2.02,
"Y": 1.97,
"P": 1.93,
"B": 1.29,
"V": 0.98,
"K": 0.77,
"J": 0.15,
"X": 0.15,
"Q": 0.10,
"Z": 0.07,
}
_lowercase: Any = "ETAOINSHRDLCUMWFGYPBVKJXQZ"
_lowercase: Optional[int] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def a( A : int ) -> dict[str, int]:
"""simple docstring"""
a = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a( A : List[Any] ) -> str:
"""simple docstring"""
return x[0]
def a( A : Dict ) -> str:
"""simple docstring"""
a = get_letter_count(A )
a = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A )
a = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find , reverse=A )
a = ''''''.join(freq_to_letter[freq] )
a = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A , reverse=A )
a = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A )
def a( A : Union[str, Any] ) -> int:
"""simple docstring"""
a = get_frequency_order(A )
a = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 227
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 0
|
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.17.0.dev0''')
require_version('''datasets>=1.8.0''', '''To fix: pip install -r examples/pytorch/text-classification/requirements.txt''')
_A = logging.getLogger(__name__)
@dataclass
class lowercase_ :
A__ : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
A__ : Optional[str] = field(
default="""tab_fact""" , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} , )
A__ : int = field(
default=1024 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
A__ : bool = field(
default=_lowerCamelCase , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
A__ : bool = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Whether to pad all samples to `max_seq_length`. """
"""If False, will pad the samples dynamically when batching to the maximum length in the batch."""
)
} , )
A__ : Optional[int] = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of training examples to this """
"""value if set."""
)
} , )
A__ : Optional[int] = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of evaluation examples to this """
"""value if set."""
)
} , )
A__ : Optional[int] = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""For debugging purposes or quicker training, truncate the number of prediction examples to this """
"""value if set."""
)
} , )
A__ : Optional[str] = field(
default=_lowerCamelCase , metadata={"""help""": """A csv or a json file containing the training data."""} )
A__ : Optional[str] = field(
default=_lowerCamelCase , metadata={"""help""": """A csv or a json file containing the validation data."""} )
A__ : Optional[str] = field(default=_lowerCamelCase , metadata={"""help""": """A csv or a json file containing the test data."""} )
def lowerCamelCase_ ( self ):
"""simple docstring"""
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
UpperCamelCase_ = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
UpperCamelCase_ = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class lowercase_ :
A__ : str = field(
default=_lowerCamelCase , metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
A__ : Optional[str] = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
A__ : Optional[str] = field(
default=_lowerCamelCase , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
A__ : Optional[str] = field(
default=_lowerCamelCase , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
A__ : bool = field(
default=_lowerCamelCase , metadata={"""help""": """Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."""} , )
A__ : str = field(
default="""main""" , metadata={"""help""": """The specific model version to use (can be a branch name, tag name or commit id)."""} , )
A__ : bool = field(
default=_lowerCamelCase , metadata={
"""help""": (
"""Will use the token generated when running `huggingface-cli login` (necessary to use this script """
"""with private models)."""
)
} , )
def lowerCamelCase__ ( ) -> str:
UpperCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCamelCase_ = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCamelCase_ = training_args.get_process_log_level()
logger.setLevel(a__ )
datasets.utils.logging.set_verbosity(a__ )
transformers.utils.logging.set_verbosity(a__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCamelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCamelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase_ = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
UpperCamelCase_ = {'''train''': data_args.train_file, '''validation''': data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
UpperCamelCase_ = data_args.train_file.split(""".""" )[-1]
UpperCamelCase_ = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
UpperCamelCase_ = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f'''load a local file for {key}: {data_files[key]}''' )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
UpperCamelCase_ = load_dataset("""csv""" , data_files=a__ , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
UpperCamelCase_ = load_dataset("""json""" , data_files=a__ , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
UpperCamelCase_ = raw_datasets['''train'''].features['''label'''].names
UpperCamelCase_ = len(a__ )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
UpperCamelCase_ = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=a__ , )
UpperCamelCase_ = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=a__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
UpperCamelCase_ = '''max_length'''
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
UpperCamelCase_ = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
UpperCamelCase_ = {'''Refused''': 0, '''Entailed''': 1}
UpperCamelCase_ = {0: '''Refused''', 1: '''Entailed'''}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCamelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(a__ : int ):
# Tokenize the texts
def _convert_table_text_to_pandas(a__ : Any ):
UpperCamelCase_ = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
UpperCamelCase_ = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
UpperCamelCase_ = examples['''statement''']
UpperCamelCase_ = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
UpperCamelCase_ = tokenizer(a__ , a__ , padding=a__ , max_length=a__ , truncation=a__ )
UpperCamelCase_ = examples['''label''']
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
UpperCamelCase_ = raw_datasets.map(
a__ , batched=a__ , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
UpperCamelCase_ = raw_datasets['''train''']
if data_args.max_train_samples is not None:
UpperCamelCase_ = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
UpperCamelCase_ = raw_datasets['''validation''']
if data_args.max_eval_samples is not None:
UpperCamelCase_ = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
UpperCamelCase_ = raw_datasets['''test''']
if data_args.max_predict_samples is not None:
UpperCamelCase_ = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(a__ ) ) , 3 ):
logger.info(f'''Sample {index} of the training set: {train_dataset[index]}.''' )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(a__ : Optional[Any] ):
UpperCamelCase_ = p.predictions[0] if isinstance(p.predictions , a__ ) else p.predictions
UpperCamelCase_ = np.argmax(a__ , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
UpperCamelCase_ = default_data_collator
elif training_args.fpaa:
UpperCamelCase_ = DataCollatorWithPadding(a__ , pad_to_multiple_of=8 )
else:
UpperCamelCase_ = None
# Initialize our Trainer
UpperCamelCase_ = Trainer(
model=a__ , args=a__ , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=a__ , tokenizer=a__ , data_collator=a__ , )
# Training
if training_args.do_train:
UpperCamelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCamelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCamelCase_ = last_checkpoint
UpperCamelCase_ = trainer.train(resume_from_checkpoint=a__ )
UpperCamelCase_ = train_result.metrics
UpperCamelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(a__ )
)
UpperCamelCase_ = min(a__ , len(a__ ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , a__ )
trainer.save_metrics("""train""" , a__ )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
UpperCamelCase_ = trainer.evaluate(eval_dataset=a__ )
UpperCamelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(a__ )
UpperCamelCase_ = min(a__ , len(a__ ) )
trainer.log_metrics("""eval""" , a__ )
trainer.save_metrics("""eval""" , a__ )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
UpperCamelCase_ = predict_dataset.remove_columns("""label""" )
UpperCamelCase_ = trainer.predict(a__ , metric_key_prefix="""predict""" ).predictions
UpperCamelCase_ = np.argmax(a__ , axis=1 )
UpperCamelCase_ = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(a__ , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(a__ ):
UpperCamelCase_ = label_list[item]
writer.write(f'''{index}\t{item}\n''' )
UpperCamelCase_ = {'''finetuned_from''': model_args.model_name_or_path, '''tasks''': '''text-classification'''}
if training_args.push_to_hub:
trainer.push_to_hub(**a__ )
else:
trainer.create_model_card(**a__ )
def lowerCamelCase__ ( a__ : str ) -> List[str]:
main()
if __name__ == "__main__":
main()
| 122
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 0
|
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
__a = {
'post_extract_proj': 'feature_projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.upsample.0': 'encoder.upsample.projection',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'layer_norm',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
}
def a ( snake_case__: Optional[Any] , snake_case__: str , snake_case__: Tuple , snake_case__: int , snake_case__: List[Any] ):
'''simple docstring'''
for attribute in key.split('''.''' ):
lowercase_ = getattr(snake_case__ , snake_case__ )
if weight_type is not None:
lowercase_ = getattr(snake_case__ , snake_case__ ).shape
else:
lowercase_ = hf_pointer.shape
assert hf_shape == value.shape, (
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
lowercase_ = value
elif weight_type == "weight_g":
lowercase_ = value
elif weight_type == "weight_v":
lowercase_ = value
elif weight_type == "bias":
lowercase_ = value
else:
lowercase_ = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def a ( snake_case__: List[Any] , snake_case__: List[Any] , snake_case__: Dict ):
'''simple docstring'''
lowercase_ = []
lowercase_ = fairseq_model.state_dict()
lowercase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ = False
if "conv_layers" in name:
load_conv_layer(
snake_case__ , snake_case__ , snake_case__ , snake_case__ , hf_model.config.feat_extract_norm == '''group''' , )
lowercase_ = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ = '''sew.''' + mapped_key if (is_finetuned and mapped_key != '''lm_head''') else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
lowercase_ = True
if "*" in mapped_key:
lowercase_ = name.split(snake_case__ )[0].split('''.''' )[-2]
lowercase_ = mapped_key.replace('''*''' , snake_case__ )
if "weight_g" in name:
lowercase_ = '''weight_g'''
elif "weight_v" in name:
lowercase_ = '''weight_v'''
elif "weight" in name:
lowercase_ = '''weight'''
elif "bias" in name:
lowercase_ = '''bias'''
else:
lowercase_ = None
set_recursively(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
continue
if not is_used:
unused_weights.append(snake_case__ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def a ( snake_case__: Union[str, Any] , snake_case__: Optional[Any] , snake_case__: List[str] , snake_case__: Dict , snake_case__: Optional[Any] ):
'''simple docstring'''
lowercase_ = full_name.split('''conv_layers.''' )[-1]
lowercase_ = name.split('''.''' )
lowercase_ = int(items[0] )
lowercase_ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
lowercase_ = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case__ )
def a ( snake_case__: Union[str, Any] , snake_case__: Tuple ):
'''simple docstring'''
lowercase_ = SEWConfig()
if is_finetuned:
lowercase_ = model.wav_encoder.wav_model.cfg
else:
lowercase_ = model.cfg
lowercase_ = fs_config.conv_bias
lowercase_ = eval(fs_config.conv_feature_layers )
lowercase_ = [x[0] for x in conv_layers]
lowercase_ = [x[1] for x in conv_layers]
lowercase_ = [x[2] for x in conv_layers]
lowercase_ = '''gelu'''
lowercase_ = '''layer''' if fs_config.extractor_mode == '''layer_norm''' else '''group'''
lowercase_ = 0.0
lowercase_ = fs_config.activation_fn.name
lowercase_ = fs_config.encoder_embed_dim
lowercase_ = 0.0_2
lowercase_ = fs_config.encoder_ffn_embed_dim
lowercase_ = 1e-5
lowercase_ = fs_config.encoder_layerdrop
lowercase_ = fs_config.encoder_attention_heads
lowercase_ = fs_config.conv_pos_groups
lowercase_ = fs_config.conv_pos
lowercase_ = len(snake_case__ )
lowercase_ = fs_config.encoder_layers
lowercase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
lowercase_ = model.cfg
lowercase_ = fs_config.final_dropout
lowercase_ = fs_config.layerdrop
lowercase_ = fs_config.activation_dropout
lowercase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
lowercase_ = fs_config.attention_dropout
lowercase_ = fs_config.dropout_input
lowercase_ = fs_config.dropout
lowercase_ = fs_config.mask_channel_length
lowercase_ = fs_config.mask_channel_prob
lowercase_ = fs_config.mask_length
lowercase_ = fs_config.mask_prob
lowercase_ = '''Wav2Vec2FeatureExtractor'''
lowercase_ = '''Wav2Vec2CTCTokenizer'''
return config
@torch.no_grad()
def a ( snake_case__: Optional[int] , snake_case__: Optional[Any] , snake_case__: Union[str, Any]=None , snake_case__: Optional[int]=None , snake_case__: Optional[int]=True ):
'''simple docstring'''
if is_finetuned:
lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
else:
lowercase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
lowercase_ = SEWConfig.from_pretrained(snake_case__ )
else:
lowercase_ = convert_config(model[0] , snake_case__ )
lowercase_ = model[0].eval()
lowercase_ = True if config.feat_extract_norm == '''layer''' else False
lowercase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=snake_case__ , return_attention_mask=snake_case__ , )
if is_finetuned:
if dict_path:
lowercase_ = Dictionary.load(snake_case__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.pad_index
lowercase_ = target_dict.bos_index
lowercase_ = target_dict.eos_index
lowercase_ = len(target_dict.symbols )
lowercase_ = os.path.join(snake_case__ , '''vocab.json''' )
if not os.path.isdir(snake_case__ ):
logger.error('''--pytorch_dump_folder_path ({}) should be a directory'''.format(snake_case__ ) )
return
os.makedirs(snake_case__ , exist_ok=snake_case__ )
with open(snake_case__ , '''w''' , encoding='''utf-8''' ) as vocab_handle:
json.dump(target_dict.indices , snake_case__ )
lowercase_ = WavaVecaCTCTokenizer(
snake_case__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='''|''' , do_lower_case=snake_case__ , )
lowercase_ = WavaVecaProcessor(feature_extractor=snake_case__ , tokenizer=snake_case__ )
processor.save_pretrained(snake_case__ )
lowercase_ = SEWForCTC(snake_case__ )
else:
lowercase_ = SEWModel(snake_case__ )
feature_extractor.save_pretrained(snake_case__ )
recursively_load_weights(snake_case__ , snake_case__ , snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--is_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
__a = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 30
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 0
|
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: Dict = logging.get_logger(__name__)
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : str=False ):
UpperCAmelCase : Dict = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith("""head""" ):
UpperCAmelCase : Any = '''segformer.encoder.''' + key
if key.startswith("""backbone""" ):
UpperCAmelCase : List[Any] = key.replace("""backbone""" , """segformer.encoder""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
UpperCAmelCase : Optional[int] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
UpperCAmelCase : Tuple = key.replace(F"patch_embed{idx}" , F"patch_embeddings.{int(UpperCamelCase )-1}" )
if "norm" in key:
UpperCAmelCase : Tuple = key.replace("""norm""" , """layer_norm""" )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
UpperCAmelCase : Dict = key[key.find("""segformer.encoder.layer_norm""" ) + len("""segformer.encoder.layer_norm""" )]
UpperCAmelCase : Dict = key.replace(F"layer_norm{idx}" , F"layer_norm.{int(UpperCamelCase )-1}" )
if "layer_norm1" in key:
UpperCAmelCase : Dict = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
UpperCAmelCase : List[str] = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
UpperCAmelCase : List[Any] = key[key.find("""block""" ) + len("""block""" )]
UpperCAmelCase : Optional[int] = key.replace(F"block{idx}" , F"block.{int(UpperCamelCase )-1}" )
if "attn.q" in key:
UpperCAmelCase : List[Any] = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
UpperCAmelCase : List[str] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
UpperCAmelCase : Union[str, Any] = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
UpperCAmelCase : Union[str, Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
UpperCAmelCase : str = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
UpperCAmelCase : List[Any] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
UpperCAmelCase : str = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
UpperCAmelCase : List[str] = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
UpperCAmelCase : Any = key[key.find("""linear_c""" ) + len("""linear_c""" )]
UpperCAmelCase : List[str] = key.replace(F"linear_c{idx}" , F"linear_c.{int(UpperCamelCase )-1}" )
if key.startswith("""head""" ):
UpperCAmelCase : List[Any] = key.replace("""head""" , """classifier""" )
UpperCAmelCase : int = value
return new_state_dict
def _snake_case ( UpperCamelCase : Any , UpperCamelCase : Dict ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
UpperCAmelCase : int = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.weight" )
UpperCAmelCase : Optional[int] = state_dict.pop(F"segformer.encoder.block.{i}.{j}.attention.self.kv.bias" )
# next, add keys and values (in that order) to the state dict
UpperCAmelCase : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
UpperCAmelCase : str = kv_bias[: config.hidden_sizes[i]]
UpperCAmelCase : List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
UpperCAmelCase : Optional[Any] = kv_bias[
config.hidden_sizes[i] :
]
def _snake_case ( ):
UpperCAmelCase : List[Any] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
UpperCAmelCase : List[str] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return image
@torch.no_grad()
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any ):
UpperCAmelCase : List[str] = SegformerConfig()
UpperCAmelCase : int = False
# set attributes based on model_name
UpperCAmelCase : int = '''huggingface/label-files'''
if "segformer" in model_name:
UpperCAmelCase : Tuple = model_name[len("""segformer.""" ) : len("""segformer.""" ) + 2]
if "ade" in model_name:
UpperCAmelCase : Union[str, Any] = 150
UpperCAmelCase : Union[str, Any] = '''ade20k-id2label.json'''
UpperCAmelCase : int = (1, 150, 128, 128)
elif "city" in model_name:
UpperCAmelCase : Any = 19
UpperCAmelCase : List[str] = '''cityscapes-id2label.json'''
UpperCAmelCase : Optional[Any] = (1, 19, 128, 128)
else:
raise ValueError(F"Model {model_name} not supported" )
elif "mit" in model_name:
UpperCAmelCase : List[str] = True
UpperCAmelCase : Union[str, Any] = model_name[4:6]
UpperCAmelCase : Optional[Any] = 1000
UpperCAmelCase : str = '''imagenet-1k-id2label.json'''
UpperCAmelCase : List[Any] = (1, 1000)
else:
raise ValueError(F"Model {model_name} not supported" )
# set config attributes
UpperCAmelCase : List[str] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : Any = {int(UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : Any = idalabel
UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
UpperCAmelCase : List[Any] = [64, 128, 320, 512]
UpperCAmelCase : Optional[int] = 256
elif size == "b2":
UpperCAmelCase : List[Any] = [64, 128, 320, 512]
UpperCAmelCase : Union[str, Any] = 768
UpperCAmelCase : int = [3, 4, 6, 3]
elif size == "b3":
UpperCAmelCase : List[Any] = [64, 128, 320, 512]
UpperCAmelCase : List[Any] = 768
UpperCAmelCase : List[str] = [3, 4, 18, 3]
elif size == "b4":
UpperCAmelCase : int = [64, 128, 320, 512]
UpperCAmelCase : Optional[int] = 768
UpperCAmelCase : int = [3, 8, 27, 3]
elif size == "b5":
UpperCAmelCase : Optional[int] = [64, 128, 320, 512]
UpperCAmelCase : Optional[int] = 768
UpperCAmelCase : int = [3, 6, 40, 3]
else:
raise ValueError(F"Size {size} not supported" )
# load image processor (only resize + normalize)
UpperCAmelCase : List[Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=UpperCamelCase , align=UpperCamelCase , do_random_crop=UpperCamelCase )
# prepare image
UpperCAmelCase : Optional[Any] = prepare_img()
UpperCAmelCase : List[str] = image_processor(images=UpperCamelCase , return_tensors="""pt""" ).pixel_values
logger.info(F"Converting model {model_name}..." )
# load original state dict
if encoder_only:
UpperCAmelCase : Dict = torch.load(UpperCamelCase , map_location=torch.device("""cpu""" ) )
else:
UpperCAmelCase : Optional[Any] = torch.load(UpperCamelCase , map_location=torch.device("""cpu""" ) )['''state_dict''']
# rename keys
UpperCAmelCase : Dict = rename_keys(UpperCamelCase , encoder_only=UpperCamelCase )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(UpperCamelCase , UpperCamelCase )
# create HuggingFace model and load state dict
if encoder_only:
UpperCAmelCase : str = False
UpperCAmelCase : Union[str, Any] = SegformerForImageClassification(UpperCamelCase )
else:
UpperCAmelCase : List[Any] = SegformerForSemanticSegmentation(UpperCamelCase )
model.load_state_dict(UpperCamelCase )
model.eval()
# forward pass
UpperCAmelCase : Any = model(UpperCamelCase )
UpperCAmelCase : Optional[int] = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
UpperCAmelCase : Optional[int] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
UpperCAmelCase : Dict = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
UpperCAmelCase : int = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
UpperCAmelCase : Union[str, Any] = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
UpperCAmelCase : List[Any] = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
UpperCAmelCase : Optional[Any] = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
UpperCAmelCase : Dict = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
UpperCAmelCase : List[str] = torch.tensor(
[
[
[-1.1_3_7_2e0_1, -1.2_7_8_7e0_1, -1.3_4_7_7e0_1],
[-1.2_5_3_6e0_1, -1.4_1_9_4e0_1, -1.4_4_0_9e0_1],
[-1.3_2_1_7e0_1, -1.4_8_8_8e0_1, -1.5_3_2_7e0_1],
],
[
[-1.4_7_9_1e0_1, -1.7_1_2_2e0_1, -1.8_2_7_7e0_1],
[-1.7_1_6_3e0_1, -1.9_1_9_2e0_1, -1.9_5_3_3e0_1],
[-1.7_8_9_7e0_1, -1.9_9_9_1e0_1, -2.0_3_1_5e0_1],
],
[
[7.6_7_2_3e-0_1, 4.1_9_2_1e-0_1, -7.7_8_7_8e-0_2],
[4.7_7_7_2e-0_1, 9.5_5_5_7e-0_3, -2.8_0_8_2e-0_1],
[3.6_0_3_2e-0_1, -2.4_8_2_6e-0_1, -5.1_1_6_8e-0_1],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
UpperCAmelCase : Any = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
UpperCAmelCase : List[str] = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
UpperCAmelCase : Optional[int] = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
UpperCAmelCase : List[Any] = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
UpperCAmelCase : List[Any] = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
UpperCAmelCase : Dict = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
UpperCAmelCase : Union[str, Any] = logits.argmax(-1 ).item()
print("""Predicted class:""" , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , UpperCamelCase , atol=1e-2 )
# finally, save model and image processor
logger.info(F"Saving PyTorch model and image processor to {pytorch_dump_folder_path}..." )
Path(UpperCamelCase ).mkdir(exist_ok=UpperCamelCase )
model.save_pretrained(UpperCamelCase )
image_processor.save_pretrained(UpperCamelCase )
if __name__ == "__main__":
A: Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="segformer.b0.512x512.ade.160k",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
A: Dict = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 109
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
UpperCAmelCase_ = {
'configuration_chinese_clip': [
'CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ChineseCLIPConfig',
'ChineseCLIPOnnxConfig',
'ChineseCLIPTextConfig',
'ChineseCLIPVisionConfig',
],
'processing_chinese_clip': ['ChineseCLIPProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['ChineseCLIPFeatureExtractor']
UpperCAmelCase_ = ['ChineseCLIPImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ChineseCLIPModel',
'ChineseCLIPPreTrainedModel',
'ChineseCLIPTextModel',
'ChineseCLIPVisionModel',
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
from math import ceil
def __snake_case ( _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] ) -> List[str]:
A_ : List[str] = list(range(0 , _lowerCAmelCase ) )
A_ : Any = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
A_ : int = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCAmelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCAmelCase )
# Missing blocks
A_ : Optional[int] = [i for i in blocks if i not in device_map_blocks]
A_ : List[Any] = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."
" These attention blocks were specified more than once: " + str(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"There are attention blocks for this model that are not specified in the device_map. Add these attention "
"blocks to a device on the device_map: " + str(_lowerCAmelCase ) )
if len(_lowerCAmelCase ) != 0:
raise ValueError(
"The device_map contains more attention blocks than this model has. Remove these from the device_map:"
+ str(_lowerCAmelCase ) )
def __snake_case ( _lowerCAmelCase : int , _lowerCAmelCase : List[str] ) -> List[Any]:
A_ : str = list(range(_lowerCAmelCase ) )
A_ : int = int(ceil(n_layers / len(_lowerCAmelCase ) ) )
A_ : Union[str, Any] = [layers[i : i + n_blocks] for i in range(0 , _lowerCAmelCase , _lowerCAmelCase )]
return dict(zip(_lowerCAmelCase , _lowerCAmelCase ) )
| 300
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__A = {
"configuration_convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertOnnxConfig"],
"tokenization_convbert": ["ConvBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = ["ConvBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvBertForMaskedLM",
"ConvBertForMultipleChoice",
"ConvBertForQuestionAnswering",
"ConvBertForSequenceClassification",
"ConvBertForTokenClassification",
"ConvBertLayer",
"ConvBertModel",
"ConvBertPreTrainedModel",
"load_tf_weights_in_convbert",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A = [
"TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFConvBertForMaskedLM",
"TFConvBertForMultipleChoice",
"TFConvBertForQuestionAnswering",
"TFConvBertForSequenceClassification",
"TFConvBertForTokenClassification",
"TFConvBertLayer",
"TFConvBertModel",
"TFConvBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
__A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 148
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
"""simple docstring"""
from math import factorial
def _snake_case ( lowercase__ = 100 ):
return sum(int(lowercase__ ) for x in str(factorial(lowercase__ ) ) )
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip())))
| 96
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 0
|
"""simple docstring"""
SCREAMING_SNAKE_CASE = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = input("Enter message: " )
A__ = input("Enter key [alphanumeric]: " )
A__ = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
A__ = '''encrypt'''
A__ = encrypt_message(lowercase_ , lowercase_ )
elif mode.lower().startswith("d" ):
A__ = '''decrypt'''
A__ = decrypt_message(lowercase_ , lowercase_ )
print(f"""\n{mode.title()}ed message:""" )
print(lowercase_ )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
return translate_message(lowercase_ , lowercase_ , "encrypt" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> str:
return translate_message(lowercase_ , lowercase_ , "decrypt" )
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> str:
A__ = []
A__ = 0
A__ = key.upper()
for symbol in message:
A__ = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowercase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowercase_ ):
A__ = 0
else:
translated.append(lowercase_ )
return "".join(lowercase_ )
if __name__ == "__main__":
main()
| 247
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_a = {"""configuration_vit_mae""": ["""VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMAEConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMAEForPreTraining""",
"""ViTMAELayer""",
"""ViTMAEModel""",
"""ViTMAEPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
"""TFViTMAEForPreTraining""",
"""TFViTMAEModel""",
"""TFViTMAEPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
_a = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 194
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 0
|
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __A ( _lowerCamelCase ):
a__ : int = ['''image_processor''', '''tokenizer''']
a__ : str = '''CLIPImageProcessor'''
a__ : List[str] = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__(self : Optional[Any] , __a : str=None , __a : Tuple=None , **__a : Optional[int] ):
UpperCAmelCase_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , _A , )
UpperCAmelCase_ = kwargs.pop("feature_extractor" )
UpperCAmelCase_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(_A , _A )
def __call__(self : Any , __a : str=None , __a : str=None , __a : str=None , **__a : Optional[int] ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(_A , return_tensors=_A , **_A )
if images is not None:
UpperCAmelCase_ = self.image_processor(_A , return_tensors=_A , **_A )
if text is not None and images is not None:
UpperCAmelCase_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_A ) , tensor_type=_A )
def _lowercase (self : Optional[Any] , *__a : Dict , **__a : List[Any] ):
return self.tokenizer.batch_decode(*_A , **_A )
def _lowercase (self : List[str] , *__a : Union[str, Any] , **__a : Tuple ):
return self.tokenizer.decode(*_A , **_A )
@property
def _lowercase (self : Tuple ):
UpperCAmelCase_ = self.tokenizer.model_input_names
UpperCAmelCase_ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowercase (self : Dict ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , _A , )
return self.image_processor_class
@property
def _lowercase (self : List[Any] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , _A , )
return self.image_processor
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 0
|
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaInpaintPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _lowercase ( _lowerCamelCase, unittest.TestCase ):
"""simple docstring"""
__A = KandinskyVaaInpaintPipeline
__A = ['''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__A = [
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__A = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__A = False
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 32
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.time_input_dim
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return 100
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.manual_seed(0 )
a = {
'''in_channels''': 9,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a = UNetaDConditionModel(**_A )
return model
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
torch.manual_seed(0 )
a = VQModel(**self.dummy_movq_kwargs )
return model
def UpperCamelCase_ (self ):
"""simple docstring"""
a = self.dummy_unet
a = self.dummy_movq
a = DDIMScheduler(
num_train_timesteps=1000 , beta_schedule="linear" , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_A , set_alpha_to_one=_A , steps_offset=1 , prediction_type="epsilon" , thresholding=_A , )
a = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=0 ):
"""simple docstring"""
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_A ) ).to(_A )
a = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_A )
# create init_image
a = floats_tensor((1, 3, 64, 64) , rng=random.Random(_A ) ).to(_A )
a = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a = Image.fromarray(np.uinta(_A ) ).convert("RGB" ).resize((256, 256) )
# create mask
a = np.ones((64, 64) , dtype=np.floataa )
a = 0
if str(_A ).startswith("mps" ):
a = torch.manual_seed(_A )
else:
a = torch.Generator(device=_A ).manual_seed(_A )
a = {
'''image''': init_image,
'''mask_image''': mask,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 2,
'''guidance_scale''': 4.0,
'''output_type''': '''np''',
}
return inputs
def UpperCamelCase_ (self ):
"""simple docstring"""
a = '''cpu'''
a = self.get_dummy_components()
a = self.pipeline_class(**_A )
a = pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
a = pipe(**self.get_dummy_inputs(_A ) )
a = output.images
a = pipe(
**self.get_dummy_inputs(_A ) , return_dict=_A , )[0]
a = image[0, -3:, -3:, -1]
a = image_from_tuple[0, -3:, -3:, -1]
print(F'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
a = np.array(
[0.5077_5903, 0.4952_7195, 0.4882_4543, 0.5019_2237, 0.4864_4906, 0.4937_3814, 0.478_0598, 0.4723_4827, 0.4832_7848] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def UpperCamelCase_ (self ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class _lowercase ( unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase_ (self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ (self ):
"""simple docstring"""
a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinskyv22/kandinskyv22_inpaint_cat_with_hat_fp16.npy" )
a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
a = np.ones((768, 768) , dtype=np.floataa )
a = 0
a = '''a hat'''
a = KandinskyVaaPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-prior" , torch_dtype=torch.floataa )
pipe_prior.to(_A )
a = KandinskyVaaInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-2-decoder-inpaint" , torch_dtype=torch.floataa )
a = pipeline.to(_A )
pipeline.set_progress_bar_config(disable=_A )
a = torch.Generator(device="cpu" ).manual_seed(0 )
a = pipe_prior(
_A , generator=_A , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
a = pipeline(
image=_A , mask_image=_A , image_embeds=_A , negative_image_embeds=_A , generator=_A , num_inference_steps=100 , height=768 , width=768 , output_type="np" , )
a = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(_A , _A )
| 227
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 0
|
def lowerCamelCase__ ( a__ : Optional[int] ) -> int:
assert column_title.isupper()
UpperCamelCase_ = 0
UpperCamelCase_ = len(a__ ) - 1
UpperCamelCase_ = 0
while index >= 0:
UpperCamelCase_ = (ord(column_title[index] ) - 64) * pow(26 , a__ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod()
| 122
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 0
|
__a = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: '9',
1_0: 'a',
1_1: 'b',
1_2: 'c',
1_3: 'd',
1_4: 'e',
1_5: 'f',
}
def a ( snake_case__: Dict ):
'''simple docstring'''
assert type(snake_case__ ) in (int, float) and decimal == int(snake_case__ )
lowercase_ = int(snake_case__ )
lowercase_ = ''''''
lowercase_ = False
if decimal < 0:
lowercase_ = True
decimal *= -1
while decimal > 0:
lowercase_ = divmod(snake_case__ , 16 )
lowercase_ = values[remainder] + hexadecimal
lowercase_ = '''0x''' + hexadecimal
if negative:
lowercase_ = '''-''' + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 30
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 0
|
"""simple docstring"""
import os
import numpy
import onnx
def _snake_case ( UpperCamelCase : Union[str, Any] , UpperCamelCase : int ):
UpperCAmelCase : List[str] = a.name
UpperCAmelCase : Any = b.name
UpperCAmelCase : str = ''''''
UpperCAmelCase : int = ''''''
UpperCAmelCase : Optional[Any] = a == b
UpperCAmelCase : Optional[Any] = name_a
UpperCAmelCase : List[Any] = name_b
return res
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : str , UpperCamelCase : List[Any] ):
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(UpperCamelCase , UpperCamelCase )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase , UpperCamelCase )
_graph_replace_input_with(node_proto.attribute[1].g , UpperCamelCase , UpperCamelCase )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , UpperCamelCase , UpperCamelCase )
def _snake_case ( UpperCamelCase : List[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple ):
for n in graph_proto.node:
_node_replace_input_with(UpperCamelCase , UpperCamelCase , UpperCamelCase )
def _snake_case ( UpperCamelCase : Dict , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
UpperCAmelCase : Dict = list(model.graph.initializer )
UpperCAmelCase : Optional[Any] = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
UpperCAmelCase : List[str] = inits[i].name
UpperCAmelCase : List[Any] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , UpperCamelCase , UpperCamelCase )
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : Union[str, Any] = os.path.dirname(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = os.path.basename(UpperCamelCase )
UpperCAmelCase : int = onnx.load(os.path.join(UpperCamelCase , UpperCamelCase ) )
UpperCAmelCase : Tuple = list(model.graph.initializer )
UpperCAmelCase : Tuple = set()
UpperCAmelCase : Any = {}
UpperCAmelCase : List[Any] = []
UpperCAmelCase : str = 0
for i in range(len(UpperCamelCase ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(UpperCamelCase ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(UpperCamelCase )
dup_set.add(UpperCamelCase )
UpperCAmelCase : Union[str, Any] = inits[j].data_type
UpperCAmelCase : Optional[int] = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , UpperCamelCase )
total_reduced_size += mem_size
UpperCAmelCase : str = inits[i].name
UpperCAmelCase : Optional[int] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(UpperCamelCase )
else:
UpperCAmelCase : int = [name_j]
ind_to_replace.append((j, i) )
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""" )
UpperCAmelCase : str = sorted(UpperCamelCase )
_remove_dup_initializers_from_model(UpperCamelCase , UpperCamelCase , UpperCamelCase )
UpperCAmelCase : int = '''optimized_''' + model_file_name
UpperCAmelCase : Union[str, Any] = os.path.join(UpperCamelCase , UpperCamelCase )
onnx.save(UpperCamelCase , UpperCamelCase )
return new_model
| 109
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 0
|
import os
import re
import sys
import traceback
import warnings
from pathlib import Path
from typing import Dict, Optional, Union
from uuid import uuida
from huggingface_hub import HfFolder, ModelCard, ModelCardData, hf_hub_download, whoami
from huggingface_hub.file_download import REGEX_COMMIT_HASH
from huggingface_hub.utils import (
EntryNotFoundError,
RepositoryNotFoundError,
RevisionNotFoundError,
is_jinja_available,
)
from packaging import version
from requests import HTTPError
from .. import __version__
from .constants import (
DEPRECATED_REVISION_ARGS,
DIFFUSERS_CACHE,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
SAFETENSORS_WEIGHTS_NAME,
WEIGHTS_NAME,
)
from .import_utils import (
ENV_VARS_TRUE_VALUES,
_flax_version,
_jax_version,
_onnxruntime_version,
_torch_version,
is_flax_available,
is_onnx_available,
is_torch_available,
)
from .logging import get_logger
UpperCAmelCase_ = get_logger(__name__)
UpperCAmelCase_ = Path(__file__).parent / 'model_card_template.md'
UpperCAmelCase_ = uuida().hex
UpperCAmelCase_ = os.getenv('HF_HUB_OFFLINE', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ = os.getenv('DISABLE_TELEMETRY', '').upper() in ENV_VARS_TRUE_VALUES
UpperCAmelCase_ = HUGGINGFACE_CO_RESOLVE_ENDPOINT + '/api/telemetry/'
def lowerCamelCase__ ( A__ : Optional[Any] = None ):
'''simple docstring'''
__lowerCamelCase = f'diffusers/{__version__}; python/{sys.version.split()[0]}; session_id/{SESSION_ID}'
if DISABLE_TELEMETRY or HF_HUB_OFFLINE:
return ua + "; telemetry/off"
if is_torch_available():
ua += f'; torch/{_torch_version}'
if is_flax_available():
ua += f'; jax/{_jax_version}'
ua += f'; flax/{_flax_version}'
if is_onnx_available():
ua += f'; onnxruntime/{_onnxruntime_version}'
# CI will set this value to True
if os.environ.get("""DIFFUSERS_IS_CI""" , """""" ).upper() in ENV_VARS_TRUE_VALUES:
ua += "; is_ci/true"
if isinstance(A__ , A__ ):
ua += "; " + "; ".join(f'{k}/{v}' for k, v in user_agent.items() )
elif isinstance(A__ , A__ ):
ua += "; " + user_agent
return ua
def lowerCamelCase__ ( A__ : Dict , A__ : Tuple = None , A__ : Tuple = None ):
'''simple docstring'''
if token is None:
__lowerCamelCase = HfFolder.get_token()
if organization is None:
__lowerCamelCase = whoami(A__ )['''name''']
return f'{username}/{model_id}'
else:
return f'{organization}/{model_id}'
def lowerCamelCase__ ( A__ : str , A__ : Optional[int] ):
'''simple docstring'''
if not is_jinja_available():
raise ValueError(
"""Modelcard rendering is based on Jinja templates."""
""" Please make sure to have `jinja` installed before using `create_model_card`."""
""" To install it, please run `pip install Jinja2`.""" )
if hasattr(A__ , """local_rank""" ) and args.local_rank not in [-1, 0]:
return
__lowerCamelCase = args.hub_token if hasattr(A__ , """hub_token""" ) else None
__lowerCamelCase = get_full_repo_name(A__ , token=A__ )
__lowerCamelCase = ModelCard.from_template(
card_data=ModelCardData( # Card metadata object that will be converted to YAML block
language="""en""" , license="""apache-2.0""" , library_name="""diffusers""" , tags=[] , datasets=args.dataset_name , metrics=[] , ) , template_path=A__ , model_name=A__ , repo_name=A__ , dataset_name=args.dataset_name if hasattr(A__ , """dataset_name""" ) else None , learning_rate=args.learning_rate , train_batch_size=args.train_batch_size , eval_batch_size=args.eval_batch_size , gradient_accumulation_steps=(
args.gradient_accumulation_steps if hasattr(A__ , """gradient_accumulation_steps""" ) else None
) , adam_betaa=args.adam_betaa if hasattr(A__ , """adam_beta1""" ) else None , adam_betaa=args.adam_betaa if hasattr(A__ , """adam_beta2""" ) else None , adam_weight_decay=args.adam_weight_decay if hasattr(A__ , """adam_weight_decay""" ) else None , adam_epsilon=args.adam_epsilon if hasattr(A__ , """adam_epsilon""" ) else None , lr_scheduler=args.lr_scheduler if hasattr(A__ , """lr_scheduler""" ) else None , lr_warmup_steps=args.lr_warmup_steps if hasattr(A__ , """lr_warmup_steps""" ) else None , ema_inv_gamma=args.ema_inv_gamma if hasattr(A__ , """ema_inv_gamma""" ) else None , ema_power=args.ema_power if hasattr(A__ , """ema_power""" ) else None , ema_max_decay=args.ema_max_decay if hasattr(A__ , """ema_max_decay""" ) else None , mixed_precision=args.mixed_precision , )
__lowerCamelCase = os.path.join(args.output_dir , """README.md""" )
model_card.save(A__ )
def lowerCamelCase__ ( A__ : List[Any] , A__ : Optional[Any] = None ):
'''simple docstring'''
if resolved_file is None or commit_hash is not None:
return commit_hash
__lowerCamelCase = str(Path(A__ ).as_posix() )
__lowerCamelCase = re.search(R"""snapshots/([^/]+)/""" , A__ )
if search is None:
return None
__lowerCamelCase = search.groups()[0]
return commit_hash if REGEX_COMMIT_HASH.match(A__ ) else None
# Old default cache path, potentially to be migrated.
# This logic was more or less taken from `transformers`, with the following differences:
# - Diffusers doesn't use custom environment variables to specify the cache path.
# - There is no need to migrate the cache format, just move the files to the new location.
UpperCAmelCase_ = os.path.expanduser(
os.getenv('HF_HOME', os.path.join(os.getenv('XDG_CACHE_HOME', '~/.cache'), 'huggingface'))
)
UpperCAmelCase_ = os.path.join(hf_cache_home, 'diffusers')
def lowerCamelCase__ ( A__ : Tuple = None , A__ : List[Any] = None ):
'''simple docstring'''
if new_cache_dir is None:
__lowerCamelCase = DIFFUSERS_CACHE
if old_cache_dir is None:
__lowerCamelCase = old_diffusers_cache
__lowerCamelCase = Path(A__ ).expanduser()
__lowerCamelCase = Path(A__ ).expanduser()
for old_blob_path in old_cache_dir.glob("""**/blobs/*""" ):
if old_blob_path.is_file() and not old_blob_path.is_symlink():
__lowerCamelCase = new_cache_dir / old_blob_path.relative_to(A__ )
new_blob_path.parent.mkdir(parents=A__ , exist_ok=A__ )
os.replace(A__ , A__ )
try:
os.symlink(A__ , A__ )
except OSError:
logger.warning(
"""Could not create symlink between old cache and new cache. If you use an older version of diffusers again, files will be re-downloaded.""" )
# At this point, old_cache_dir contains symlinks to the new cache (it can still be used).
UpperCAmelCase_ = os.path.join(DIFFUSERS_CACHE, 'version_diffusers_cache.txt')
if not os.path.isfile(cache_version_file):
UpperCAmelCase_ = 0
else:
with open(cache_version_file) as f:
try:
UpperCAmelCase_ = int(f.read())
except ValueError:
UpperCAmelCase_ = 0
if cache_version < 1:
UpperCAmelCase_ = os.path.isdir(old_diffusers_cache) and len(os.listdir(old_diffusers_cache)) > 0
if old_cache_is_not_empty:
logger.warning(
'The cache for model files in Diffusers v0.14.0 has moved to a new location. Moving your '
'existing cached models. This is a one-time operation, you can interrupt it or run it '
'later by calling `diffusers.utils.hub_utils.move_cache()`.'
)
try:
move_cache()
except Exception as e:
UpperCAmelCase_ = '\n'.join(traceback.format_tb(e.__traceback__))
logger.error(
f"""There was a problem when trying to move your cache:\n\n{trace}\n{e.__class__.__name__}: {e}\n\nPlease """
'file an issue at https://github.com/huggingface/diffusers/issues/new/choose, copy paste this whole '
'message and we will do our best to help.'
)
if cache_version < 1:
try:
os.makedirs(DIFFUSERS_CACHE, exist_ok=True)
with open(cache_version_file, 'w') as f:
f.write('1')
except Exception:
logger.warning(
f"""There was a problem when trying to write in your cache folder ({DIFFUSERS_CACHE}). Please, ensure """
'the directory exists and can be written to.'
)
def lowerCamelCase__ ( A__ : Dict , A__ : Any = None ):
'''simple docstring'''
if variant is not None:
__lowerCamelCase = weights_name.split(""".""" )
__lowerCamelCase = splits[:-1] + [variant] + splits[-1:]
__lowerCamelCase = '''.'''.join(A__ )
return weights_name
def lowerCamelCase__ ( A__ : Optional[Any] , *,
A__ : List[Any] , A__ : Optional[Any] , A__ : Optional[Any] , A__ : Optional[Any] , A__ : List[Any] , A__ : Dict , A__ : Optional[int] , A__ : Union[str, Any] , A__ : List[Any] , A__ : Optional[int] , A__ : Union[str, Any]=None , ):
'''simple docstring'''
__lowerCamelCase = str(A__ )
if os.path.isfile(A__ ):
return pretrained_model_name_or_path
elif os.path.isdir(A__ ):
if os.path.isfile(os.path.join(A__ , A__ ) ):
# Load from a PyTorch checkpoint
__lowerCamelCase = os.path.join(A__ , A__ )
return model_file
elif subfolder is not None and os.path.isfile(
os.path.join(A__ , A__ , A__ ) ):
__lowerCamelCase = os.path.join(A__ , A__ , A__ )
return model_file
else:
raise EnvironmentError(
f'Error no file named {weights_name} found in directory {pretrained_model_name_or_path}.' )
else:
# 1. First check if deprecated way of loading from branches is used
if (
revision in DEPRECATED_REVISION_ARGS
and (weights_name == WEIGHTS_NAME or weights_name == SAFETENSORS_WEIGHTS_NAME)
and version.parse(version.parse(A__ ).base_version ) >= version.parse("""0.20.0""" )
):
try:
__lowerCamelCase = hf_hub_download(
A__ , filename=_add_variant(A__ , A__ ) , cache_dir=A__ , force_download=A__ , proxies=A__ , resume_download=A__ , local_files_only=A__ , use_auth_token=A__ , user_agent=A__ , subfolder=A__ , revision=revision or commit_hash , )
warnings.warn(
f'Loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'` is deprecated. Loading instead from `revision=\'main\'` with `variant={revision}`. Loading model variants via `revision=\'{revision}\'` will be removed in diffusers v1. Please use `variant=\'{revision}\'` instead.' , A__ , )
return model_file
except: # noqa: E722
warnings.warn(
f'You are loading the variant {revision} from {pretrained_model_name_or_path} via `revision=\'{revision}\'`. This behavior is deprecated and will be removed in diffusers v1. One should use `variant=\'{revision}\'` instead. However, it appears that {pretrained_model_name_or_path} currently does not have a {_add_variant(A__ , A__ )} file in the \'main\' branch of {pretrained_model_name_or_path}. \n The Diffusers team and community would be very grateful if you could open an issue: https://github.com/huggingface/diffusers/issues/new with the title \'{pretrained_model_name_or_path} is missing {_add_variant(A__ , A__ )}\' so that the correct variant file can be added.' , A__ , )
try:
# 2. Load model file as usual
__lowerCamelCase = hf_hub_download(
A__ , filename=A__ , cache_dir=A__ , force_download=A__ , proxies=A__ , resume_download=A__ , local_files_only=A__ , use_auth_token=A__ , user_agent=A__ , subfolder=A__ , revision=revision or commit_hash , )
return model_file
except RepositoryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier '
"""listed on \'https://huggingface.co/models\'\nIf this is a private repository, make sure to pass a """
"""token having permission to this repo with `use_auth_token` or log in with `huggingface-cli """
"""login`.""" )
except RevisionNotFoundError:
raise EnvironmentError(
f'{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for '
"""this model name. Check the model page at """
f'\'https://huggingface.co/{pretrained_model_name_or_path}\' for available revisions.' )
except EntryNotFoundError:
raise EnvironmentError(
f'{pretrained_model_name_or_path} does not appear to have a file named {weights_name}.' )
except HTTPError as err:
raise EnvironmentError(
f'There was a specific connection error when trying to load {pretrained_model_name_or_path}:\n{err}' )
except ValueError:
raise EnvironmentError(
f'We couldn\'t connect to \'{HUGGINGFACE_CO_RESOLVE_ENDPOINT}\' to load this model, couldn\'t find it'
f' in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a'
f' directory containing a file named {weights_name} or'
""" \nCheckout your internet connection or see how to run the library in"""
""" offline mode at \'https://huggingface.co/docs/diffusers/installation#offline-mode\'.""" )
except EnvironmentError:
raise EnvironmentError(
f'Can\'t load the model for \'{pretrained_model_name_or_path}\'. If you were trying to load it from '
"""\'https://huggingface.co/models\', make sure you don\'t have a local directory with the same name. """
f'Otherwise, make sure \'{pretrained_model_name_or_path}\' is the correct path to a directory '
f'containing a file named {weights_name}' )
| 12
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_lowerCAmelCase : Any = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __magic_name__ ( _lowerCamelCase ):
"""simple docstring"""
__UpperCamelCase = '''mgp-str'''
def __init__( self :Optional[int] , snake_case :Optional[Any]=[32, 128] , snake_case :Dict=4 , snake_case :Tuple=3 , snake_case :int=27 , snake_case :List[Any]=38 , snake_case :Any=50_257 , snake_case :Dict=30_522 , snake_case :str=768 , snake_case :Union[str, Any]=12 , snake_case :Dict=12 , snake_case :str=4.0 , snake_case :str=True , snake_case :Optional[int]=False , snake_case :Optional[int]=1e-5 , snake_case :Union[str, Any]=0.0 , snake_case :Any=0.0 , snake_case :Dict=0.0 , snake_case :List[Any]=False , snake_case :str=0.02 , **snake_case :Optional[Any] , ):
'''simple docstring'''
super().__init__(**_A )
A_ : Optional[Any] = image_size
A_ : List[Any] = patch_size
A_ : Optional[int] = num_channels
A_ : Dict = max_token_length
A_ : str = num_character_labels
A_ : int = num_bpe_labels
A_ : str = num_wordpiece_labels
A_ : List[Any] = hidden_size
A_ : List[Any] = num_hidden_layers
A_ : str = num_attention_heads
A_ : List[str] = mlp_ratio
A_ : Optional[Any] = distilled
A_ : Tuple = layer_norm_eps
A_ : int = drop_rate
A_ : List[Any] = qkv_bias
A_ : Any = attn_drop_rate
A_ : Optional[int] = drop_path_rate
A_ : List[Any] = output_aa_attentions
A_ : int = initializer_range
| 300
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 0
|
"""simple docstring"""
# using dfs for finding eulerian path traversal
def UpperCamelCase__ ( lowercase__ : int , lowercase__ : Optional[Any] , lowercase__ : Dict , lowercase__ : Any=None ):
snake_case : str = (path or []) + [u]
for v in graph[u]:
if visited_edge[u][v] is False:
snake_case : int = True, True
snake_case : List[Any] = dfs(lowercase__ , lowercase__ , lowercase__ , lowercase__ )
return path
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
snake_case : List[Any] = 0
snake_case : Union[str, Any] = -1
for i in range(lowercase__ ):
if i not in graph.keys():
continue
if len(graph[i] ) % 2 == 1:
odd_degree_nodes += 1
snake_case : Any = i
if odd_degree_nodes == 0:
return 1, odd_node
if odd_degree_nodes == 2:
return 2, odd_node
return 3, odd_node
def UpperCamelCase__ ( lowercase__ : Any , lowercase__ : Dict ):
snake_case : Optional[int] = [[False for _ in range(max_node + 1 )] for _ in range(max_node + 1 )]
snake_case : Union[str, Any] = check_circuit_or_path(lowercase__ , lowercase__ )
if check == 3:
print("graph is not Eulerian" )
print("no path" )
return
snake_case : Optional[Any] = 1
if check == 2:
snake_case : Optional[int] = odd_node
print("graph has a Euler path" )
if check == 1:
print("graph has a Euler cycle" )
snake_case : int = dfs(lowercase__ , lowercase__ , lowercase__ )
print(lowercase__ )
def UpperCamelCase__ ( ):
snake_case : Tuple = {1: [2, 3, 4], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [4]}
snake_case : Optional[Any] = {1: [2, 3, 4, 5], 2: [1, 3], 3: [1, 2], 4: [1, 5], 5: [1, 4]}
snake_case : List[Any] = {1: [2, 3, 4], 2: [1, 3, 4], 3: [1, 2], 4: [1, 2, 5], 5: [4]}
snake_case : str = {1: [2, 3], 2: [1, 3], 3: [1, 2]}
snake_case : List[Any] = {
1: [],
2: []
# all degree is zero
}
snake_case : Union[str, Any] = 10
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
check_euler(lowercase__ , lowercase__ )
if __name__ == "__main__":
main()
| 148
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 0
|
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class lowerCAmelCase__ :
'''simple docstring'''
pass
| 96
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> tuple[int, int]:
if b == 0:
return (1, 0)
(A__) = extended_euclid(lowercase_ , a % b )
A__ = a // b
return (y, x - k * y)
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
(A__) = extended_euclid(lowercase_ , lowercase_ )
A__ = na * na
A__ = ra * x * na + ra * y * na
return (n % m + m) % m
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
(A__) = extended_euclid(lowercase_ , lowercase_ )
if b < 0:
A__ = (b % n + n) % n
return b
def _SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> int:
A__ = invert_modulo(lowercase_ , lowercase_ ), invert_modulo(lowercase_ , lowercase_ )
A__ = na * na
A__ = ra * x * na + ra * y * na
return (n % m + m) % m
if __name__ == "__main__":
from doctest import testmod
testmod(name="chinese_remainder_theorem", verbose=True)
testmod(name="chinese_remainder_theorem2", verbose=True)
testmod(name="invert_modulo", verbose=True)
testmod(name="extended_euclid", verbose=True)
| 247
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 0
|
"""simple docstring"""
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case, __snake_case, __snake_case ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
_UpperCamelCase = 0
_UpperCamelCase = 0
_UpperCamelCase = knapsack(__snake_case, __snake_case, __snake_case, __snake_case, index + 1 )
if weights[index] <= max_weight:
_UpperCamelCase = values[index] + knapsack(
__snake_case, __snake_case, __snake_case, max_weight - weights[index], index + 1 )
return max(__snake_case, __snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 0
|
'''simple docstring'''
from __future__ import annotations
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : Any ) -> None:
'''simple docstring'''
if (direction == 1 and array[indexa] > array[indexa]) or (
direction == 0 and array[indexa] < array[indexa]
):
UpperCAmelCase_ = array[indexa], array[indexa]
def lowerCAmelCase_ ( snake_case_ : Union[str, Any] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : str ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase_ = int(length / 2 )
for i in range(snake_case_ , low + middle ):
comp_and_swap(snake_case_ , snake_case_ , i + middle , snake_case_ )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
bitonic_merge(snake_case_ , low + middle , snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ) -> None:
'''simple docstring'''
if length > 1:
UpperCAmelCase_ = int(length / 2 )
bitonic_sort(snake_case_ , snake_case_ , snake_case_ , 1 )
bitonic_sort(snake_case_ , low + middle , snake_case_ , 0 )
bitonic_merge(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Optional[Any] =input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_: str =[int(item.strip()) for item in user_input.split(',')]
bitonic_sort(unsorted, 0, len(unsorted), 1)
print('\nSorted array in ascending order is: ', end='')
print(*unsorted, sep=', ')
bitonic_merge(unsorted, 0, len(unsorted), 0)
print('Sorted array in descending order is: ', end='')
print(*unsorted, sep=', ')
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 0
|
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
_lowercase: List[Any] = logging.get_logger(__name__)
@dataclass
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = [
'''no_inference''',
'''no_cuda''',
'''no_tpu''',
'''no_speed''',
'''no_memory''',
'''no_env_print''',
'''no_multi_process''',
]
def __init__(self , **lowerCamelCase_ ):
"""simple docstring"""
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
a = deprecated_arg[3:]
setattr(self , _A , not kwargs.pop(_A ) )
logger.warning(
F'''{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'''
F''' {positive_arg}={kwargs[positive_arg]}''' )
a = kwargs.pop("torchscript" , self.torchscript )
a = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
a = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**_A )
__A = field(default=_lowerCamelCase, metadata={"help": "Trace the models using torchscript"} )
__A = field(default=_lowerCamelCase, metadata={"help": "Print Xla/PyTorch tpu metrics"} )
__A = field(
default="O1", metadata={
"help": (
"For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. "
"See details at https://nvidia.github.io/apex/amp.html"
)
}, )
@cached_property
def UpperCamelCase_ (self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
a = torch.device("cpu" )
a = 0
elif is_torch_tpu_available():
a = xm.xla_device()
a = 0
else:
a = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
a = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def UpperCamelCase_ (self ):
"""simple docstring"""
return self.n_gpu > 0
| 227
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 0
|
def lowerCamelCase__ ( a__ : int , a__ : Union[str, Any] ) -> float:
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F'''{price_plus_tax(100, 0.2_5) = }''')
print(F'''{price_plus_tax(125.50, 0.0_5) = }''')
| 122
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 0
|
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = 2_5_6
class lowercase__( _lowerCamelCase ):
"""simple docstring"""
a :str = ['''melgan''']
def __init__( self : List[str] , SCREAMING_SNAKE_CASE_ : SpectrogramNotesEncoder , SCREAMING_SNAKE_CASE_ : SpectrogramContEncoder , SCREAMING_SNAKE_CASE_ : TaFilmDecoder , SCREAMING_SNAKE_CASE_ : DDPMScheduler , SCREAMING_SNAKE_CASE_ : OnnxRuntimeModel if is_onnx_available() else Any , ) -> None:
super().__init__()
# From MELGAN
lowercase_ = math.log(1e-5 ) # Matches MelGAN training.
lowercase_ = 4.0 # Largest value for most examples
lowercase_ = 1_2_8
self.register_modules(
notes_encoder=_A , continuous_encoder=_A , decoder=_A , scheduler=_A , melgan=_A , )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Union[str, Any]=(-1.0, 1.0) , SCREAMING_SNAKE_CASE_ : str=False ) -> str:
lowercase_ = output_range
if clip:
lowercase_ = torch.clip(_A , self.min_value , self.max_value )
# Scale to [0, 1].
lowercase_ = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self : List[str] , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Optional[int]=(-1.0, 1.0) , SCREAMING_SNAKE_CASE_ : Tuple=False ) -> Optional[Any]:
lowercase_ = input_range
lowercase_ = torch.clip(_A , _A , _A ) if clip else outputs
# Scale to [0, 1].
lowercase_ = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ) -> int:
lowercase_ = input_tokens > 0
lowercase_ = self.notes_encoder(
encoder_input_tokens=_A , encoder_inputs_mask=_A )
lowercase_ = self.continuous_encoder(
encoder_inputs=_A , encoder_inputs_mask=_A )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Dict ) -> int:
lowercase_ = noise_time
if not torch.is_tensor(_A ):
lowercase_ = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(_A ) and len(timesteps.shape ) == 0:
lowercase_ = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
lowercase_ = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
lowercase_ = self.decoder(
encodings_and_masks=_A , decoder_input_tokens=_A , decoder_noise_time=_A )
return logits
@torch.no_grad()
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[List[int]] , SCREAMING_SNAKE_CASE_ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE_ : int = 1_0_0 , SCREAMING_SNAKE_CASE_ : bool = True , SCREAMING_SNAKE_CASE_ : str = "numpy" , SCREAMING_SNAKE_CASE_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE_ : int = 1 , ) -> Union[AudioPipelineOutput, Tuple]:
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_A , _A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(_A )}.''' )
lowercase_ = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
lowercase_ = np.zeros([1, 0, self.n_dims] , np.floataa )
lowercase_ = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
for i, encoder_input_tokens in enumerate(_A ):
if i == 0:
lowercase_ = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
lowercase_ = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=_A , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
lowercase_ = ones
lowercase_ = self.scale_features(
_A , output_range=[-1.0, 1.0] , clip=_A )
lowercase_ = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=_A , continuous_mask=_A , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
lowercase_ = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=_A , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(_A )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
lowercase_ = self.decode(
encodings_and_masks=_A , input_tokens=_A , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
lowercase_ = self.scheduler.step(_A , _A , _A , generator=_A ).prev_sample
lowercase_ = self.scale_to_features(_A , input_range=[-1.0, 1.0] )
lowercase_ = mel[:1]
lowercase_ = mel.cpu().float().numpy()
lowercase_ = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_A , _A )
logger.info('''Generated segment''' , _A )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
'''Cannot return output in \'np\' format if ONNX is not available. Make sure to have ONNX installed or set \'output_type\' to \'mel\'.''' )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
'''Cannot return output in \'np\' format if melgan component is not defined. Make sure to define `self.melgan` or set \'output_type\' to \'mel\'.''' )
if output_type == "numpy":
lowercase_ = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
lowercase_ = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=_A )
| 30
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 0
|
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : Any = StableDiffusionDiffEditPipeline
__lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'''height''', '''width''', '''image'''} | {'''image_latents'''}
__lowerCAmelCase : List[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {'''image'''} | {'''image_latents'''}
__lowerCAmelCase : Optional[Any] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__lowerCAmelCase : str = frozenset([] )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase : str = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase : str = DDIMInverseScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="""gelu""" , projection_dim=512 , )
UpperCAmelCase : Optional[int] = CLIPTextModel(_A )
UpperCAmelCase : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase : Tuple = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase : Optional[int] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith("""mps""" ):
UpperCAmelCase : Tuple = torch.manual_seed(_A )
else:
UpperCAmelCase : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase : Optional[Any] = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[Any] = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" )
if str(_A ).startswith("""mps""" ):
UpperCAmelCase : Tuple = torch.manual_seed(_A )
else:
UpperCAmelCase : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase : Any = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert("""RGB""" )
if str(_A ).startswith("""mps""" ):
UpperCAmelCase : str = torch.manual_seed(_A )
else:
UpperCAmelCase : int = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase : Dict = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if not hasattr(self.pipeline_class , """_optional_components""" ):
return
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase : Optional[int] = self.get_dummy_inputs(_A )
UpperCAmelCase : Tuple = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase : List[Any] = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase : Dict = self.get_dummy_inputs(_A )
UpperCAmelCase : Optional[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase : List[Any] = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1E-4 )
def SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
UpperCAmelCase : int = '''cpu'''
UpperCAmelCase : Optional[int] = self.get_dummy_components()
UpperCAmelCase : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : Any = self.get_dummy_mask_inputs(_A )
UpperCAmelCase : Dict = pipe.generate_mask(**_A )
UpperCAmelCase : str = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase : Any = np.array([0] * 9 )
UpperCAmelCase : int = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
UpperCAmelCase : Dict = '''cpu'''
UpperCAmelCase : Optional[Any] = self.get_dummy_components()
UpperCAmelCase : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : Optional[int] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase : int = pipe.invert(**_A ).images
UpperCAmelCase : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : Any = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def SCREAMING_SNAKE_CASE ( self ) -> Dict:
'''simple docstring'''
UpperCAmelCase : int = '''cpu'''
UpperCAmelCase : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase : Optional[int] = {'''beta_start''': 0.0_0085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase : List[Any] = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase : Union[str, Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase : int = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : List[str] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase : str = pipe.invert(**_A ).images
UpperCAmelCase : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase : List[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.5_1050, 0.5015, 0.4407, 0.4799] , )
UpperCAmelCase : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1E-3 )
@require_torch_gpu
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE ( self ) -> List[str]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def SCREAMING_SNAKE_CASE ( cls ) -> Any:
'''simple docstring'''
UpperCAmelCase : List[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png""" )
UpperCAmelCase : Union[str, Any] = raw_image.convert("""RGB""" ).resize((768, 768) )
UpperCAmelCase : int = raw_image
def SCREAMING_SNAKE_CASE ( self ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str = torch.manual_seed(0 )
UpperCAmelCase : Tuple = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase : int = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Tuple = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : Optional[int] = '''a bowl of fruit'''
UpperCAmelCase : List[str] = '''a bowl of pears'''
UpperCAmelCase : Tuple = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase : Optional[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase : Union[str, Any] = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : Tuple = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def SCREAMING_SNAKE_CASE ( self ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Dict = torch.manual_seed(0 )
UpperCAmelCase : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2-1""" , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase : List[str] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase : Optional[int] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase : Dict = '''a bowl of fruit'''
UpperCAmelCase : List[Any] = '''a bowl of pears'''
UpperCAmelCase : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase : Union[str, Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type="""numpy""" , ).images[0]
UpperCAmelCase : str = (
np.array(
load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/diffedit/pears.png""" ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 109
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__( unittest.TestCase):
@slow
def lowerCAmelCase__ ( self: Optional[Any] ):
__lowerCamelCase = TFAutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" )
__lowerCamelCase = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__lowerCamelCase = tokenizer("""Hello there""" , return_tensors="""tf""" ).input_ids
__lowerCamelCase = tokenizer("""Hi I am""" , return_tensors="""tf""" ).input_ids
__lowerCamelCase = model(_A , labels=_A ).loss
__lowerCamelCase = -tf.math.reduce_mean(_A ).numpy()
__lowerCamelCase = -21.22_8168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 12
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : Tuple=False ) -> List[str]:
A_ : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
("module.cls_token", "vit.embeddings.cls_token"),
("module.patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight"),
("module.patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias"),
("module.pos_embed", "vit.embeddings.position_embeddings"),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("module.norm.weight", "layernorm.weight"),
("module.norm.bias", "layernorm.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def __snake_case ( _lowerCAmelCase : Tuple , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[Any]=False ) -> Union[str, Any]:
for i in range(config.num_hidden_layers ):
if base_model:
A_ : Optional[int] = ''''''
else:
A_ : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Tuple = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
A_ : List[Any] = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
A_ : Tuple = in_proj_weight[
: config.hidden_size, :
]
A_ : str = in_proj_bias[: config.hidden_size]
A_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Any = in_proj_weight[
-config.hidden_size :, :
]
A_ : Optional[int] = in_proj_bias[-config.hidden_size :]
def __snake_case ( _lowerCAmelCase : str ) -> int:
A_ : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : List[str] ) -> Tuple:
A_ : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(_lowerCAmelCase , _lowerCAmelCase )
def __snake_case ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Dict ) -> Any:
A_ : List[Any] = dct.pop(_lowerCAmelCase )
A_ : Union[str, Any] = val
def __snake_case ( _lowerCAmelCase : List[Any] , _lowerCAmelCase : str ) -> Union[str, Any]:
A_ : Optional[Any] = ViTMSNConfig()
A_ : str = 1000
A_ : List[str] = '''datasets/huggingface/label-files'''
A_ : List[str] = '''imagenet-1k-id2label.json'''
A_ : Any = json.load(open(hf_hub_download(_lowerCAmelCase , _lowerCAmelCase ) , "r" ) )
A_ : Union[str, Any] = {int(_lowerCAmelCase ): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
A_ : int = 384
A_ : Optional[Any] = 1536
A_ : Tuple = 6
elif "l16" in checkpoint_url:
A_ : Union[str, Any] = 1024
A_ : List[str] = 4096
A_ : int = 24
A_ : Union[str, Any] = 16
A_ : Tuple = 0.1
elif "b4" in checkpoint_url:
A_ : Union[str, Any] = 4
elif "l7" in checkpoint_url:
A_ : Dict = 7
A_ : List[Any] = 1024
A_ : str = 4096
A_ : int = 24
A_ : Dict = 16
A_ : Tuple = 0.1
A_ : int = ViTMSNModel(_lowerCAmelCase )
A_ : List[str] = torch.hub.load_state_dict_from_url(_lowerCAmelCase , map_location="cpu" )['''target_encoder''']
A_ : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(_lowerCAmelCase )
A_ : List[str] = create_rename_keys(_lowerCAmelCase , base_model=_lowerCAmelCase )
for src, dest in rename_keys:
rename_key(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
read_in_q_k_v(_lowerCAmelCase , _lowerCAmelCase , base_model=_lowerCAmelCase )
model.load_state_dict(_lowerCAmelCase )
model.eval()
A_ : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
A_ : Optional[int] = Image.open(requests.get(_lowerCAmelCase , stream=_lowerCAmelCase ).raw )
A_ : Dict = ViTImageProcessor(
size=config.image_size , image_mean=_lowerCAmelCase , image_std=_lowerCAmelCase )
A_ : List[str] = image_processor(images=_lowerCAmelCase , return_tensors="pt" )
# forward pass
torch.manual_seed(2 )
A_ : int = model(**_lowerCAmelCase )
A_ : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
A_ : List[str] = torch.tensor([[-1.09_15, -1.48_76, -1.18_09]] )
elif "b16" in checkpoint_url:
A_ : Any = torch.tensor([[14.2889, -18.9045, 11.7281]] )
elif "l16" in checkpoint_url:
A_ : Dict = torch.tensor([[41.5028, -22.8681, 45.6475]] )
elif "b4" in checkpoint_url:
A_ : Tuple = torch.tensor([[-4.38_68, 5.29_32, -0.41_37]] )
else:
A_ : Optional[int] = torch.tensor([[-0.17_92, -0.64_65, 2.42_63]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , _lowerCAmelCase , atol=1e-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_lowerCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowerCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar''',
type=str,
help='''URL of the checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 300
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 0
|
"""simple docstring"""
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
__A = "\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n"
class lowerCamelCase__ ( unittest.TestCase , _lowerCamelCase ):
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : int = load_tool("text-question-answering" )
self.tool.setup()
snake_case : Union[str, Any] = load_tool("text-question-answering" , remote=_A )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : str = self.tool(_A , "What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : List[str] = self.remote_tool(_A , "What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Tuple = self.tool(text=_A , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = self.remote_tool(text=_A , question="What did Hugging Face do in April 2021?" )
self.assertEqual(_A , "launched the BigScience Research Workshop" )
| 148
|
from heapq import heappop, heappush
import numpy as np
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
lowercase , lowercase : Optional[int] = grid.shape
lowercase : Optional[int] = [-1, 1, 0, 0]
lowercase : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
lowercase , lowercase : Union[str, Any] = [(0, source)], set()
lowercase : List[str] = np.full((rows, cols) , np.inf )
lowercase : Dict = 0
lowercase : Dict = np.empty((rows, cols) , dtype=__magic_name__ )
lowercase : Any = None
while queue:
((lowercase) , (lowercase)) : Optional[Any] = heappop(__magic_name__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
lowercase : Tuple = []
while (x, y) != source:
path.append((x, y) )
lowercase , lowercase : Optional[int] = predecessors[x, y]
path.append(__magic_name__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(__magic_name__ ) ):
lowercase , lowercase : Optional[int] = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
lowercase : List[Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(__magic_name__ , (dist + 1, (nx, ny)) )
lowercase : int = dist + 1
lowercase : Optional[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 308
| 0
|
"""simple docstring"""
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / """utils"""))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowercase__ = get_tests_dir("""fixtures""")
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
def A_ ( self ):
_lowerCamelCase : Union[str, Any] = mock.Mock()
_lowerCamelCase : Dict = 500
_lowerCamelCase : List[str] = {}
_lowerCamelCase : int = HTTPError
_lowerCamelCase : Union[str, Any] = {}
# Download this model to make sure it's in the cache.
_lowerCamelCase : Tuple = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request' , return_value=_A ) as mock_head:
_lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained('hf-internal-testing/tiny-random-wav2vec2' )
# This check we did call the fake head request
mock_head.assert_called()
def A_ ( self ):
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json' )
@is_staging_test
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def A_ ( cls ):
_lowerCamelCase : Optional[Any] = TOKEN
HfFolder.save_token(_A )
@classmethod
def A_ ( cls ):
try:
delete_repo(token=cls._token , repo_id='test-feature-extractor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-feature-extractor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-feature-extractor' )
except HTTPError:
pass
def A_ ( self ):
_lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('test-feature-extractor' , use_auth_token=self._token )
_lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_A , repo_id='test-feature-extractor' , push_to_hub=_A , use_auth_token=self._token )
_lowerCamelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(F'''{USER}/test-feature-extractor''' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def A_ ( self ):
_lowerCamelCase : str = WavaVecaFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('valid_org/test-feature-extractor' , use_auth_token=self._token )
_lowerCamelCase : List[Any] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
# Reset repo
delete_repo(token=self._token , repo_id='valid_org/test-feature-extractor' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
_A , repo_id='valid_org/test-feature-extractor-org' , push_to_hub=_A , use_auth_token=self._token )
_lowerCamelCase : List[str] = WavaVecaFeatureExtractor.from_pretrained('valid_org/test-feature-extractor-org' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(_A , getattr(_A , _A ) )
def A_ ( self ):
CustomFeatureExtractor.register_for_auto_class()
_lowerCamelCase : Any = CustomFeatureExtractor.from_pretrained(_A )
feature_extractor.push_to_hub('test-dynamic-feature-extractor' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor'} , )
_lowerCamelCase : Any = AutoFeatureExtractor.from_pretrained(
F'''{USER}/test-dynamic-feature-extractor''' , trust_remote_code=_A )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , 'CustomFeatureExtractor' )
| 96
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCAmelCase_ = {
'configuration_mask2former': [
'MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'Mask2FormerConfig',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = ['Mask2FormerImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ = [
'MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'Mask2FormerForUniversalSegmentation',
'Mask2FormerModel',
'Mask2FormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 308
| 0
|
"""simple docstring"""
import string
def _SCREAMING_SNAKE_CASE ( lowercase_ ) -> None:
for key in range(len(string.ascii_uppercase ) ):
A__ = ''''''
for symbol in message:
if symbol in string.ascii_uppercase:
A__ = string.ascii_uppercase.find(lowercase_ )
A__ = num - key
if num < 0:
A__ = num + len(string.ascii_uppercase )
A__ = translated + string.ascii_uppercase[num]
else:
A__ = translated + symbol
print(f"""Decryption using Key #{key}: {translated}""" )
def _SCREAMING_SNAKE_CASE ( ) -> None:
A__ = input("Encrypted message: " )
A__ = message.upper()
decrypt(lowercase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 247
|
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : List[Any] = abs(__magic_name__ )
lowercase : Optional[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = abs(__magic_name__ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
return sum(int(__magic_name__ ) for c in str(abs(__magic_name__ ) ) )
def snake_case( ) -> None:
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__magic_name__ , __magic_name__ ) -> None:
lowercase : str = F"""{func.__name__}({value})"""
lowercase : Any = timeit(F"""__main__.{call}""" , setup='''import __main__''' )
print(F"""{call:56} = {func(__magic_name__ )} -- {timing:.4f} seconds""" )
for value in (26_21_44, 11_25_89_99_06_84_26_24, 1_26_76_50_60_02_28_22_94_01_49_67_03_20_53_76):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__magic_name__ , __magic_name__ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 308
| 0
|
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a) -> int:
'''simple docstring'''
_UpperCamelCase = dataset
_UpperCamelCase = process
_UpperCamelCase = params
def __len__( self) -> int:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , __a) -> Any:
'''simple docstring'''
_UpperCamelCase = self.dataset[i]
_UpperCamelCase = self.process(_A , **self.params)
return processed
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a , __a=None) -> Optional[Any]:
'''simple docstring'''
_UpperCamelCase = loader
_UpperCamelCase = infer
_UpperCamelCase = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
_UpperCamelCase = None
_UpperCamelCase = loader_batch_size
# Internal bookkeeping
_UpperCamelCase = None
_UpperCamelCase = None
def __len__( self) -> Optional[int]:
'''simple docstring'''
return len(self.loader)
def __iter__( self) -> int:
'''simple docstring'''
_UpperCamelCase = iter(self.loader)
return self
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
if isinstance(self._loader_batch_data , torch.Tensor):
# Batch data is simple tensor, just fetch the slice
_UpperCamelCase = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
_UpperCamelCase = {}
for k, element in self._loader_batch_data.items():
if isinstance(_A , _A):
# Convert ModelOutput to tuple first
_UpperCamelCase = element.to_tuple()
if isinstance(element[0] , torch.Tensor):
_UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
_UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_A , _A):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor):
_UpperCamelCase = tuple(el[self._loader_batch_index].unsqueeze(0) for el in element)
elif isinstance(element[0] , np.ndarray):
_UpperCamelCase = tuple(np.expand_dims(el[self._loader_batch_index] , 0) for el in element)
continue
if element is None:
# This can happen for optional data that get passed around
_UpperCamelCase = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase = element[self._loader_batch_index].unsqueeze(0)
elif isinstance(element[self._loader_batch_index] , np.ndarray):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
_UpperCamelCase = np.expand_dims(element[self._loader_batch_index] , 0)
else:
# This is typically a list, so no need to `unsqueeze`.
_UpperCamelCase = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
_UpperCamelCase = self._loader_batch_data.__class__(_A)
self._loader_batch_index += 1
return result
def UpperCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
_UpperCamelCase = next(self.iterator)
_UpperCamelCase = self.infer(_A , **self.params)
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_A , torch.Tensor):
_UpperCamelCase = processed
else:
_UpperCamelCase = list(processed.keys())[0]
_UpperCamelCase = processed[key]
if isinstance(_A , _A):
_UpperCamelCase = len(_A)
else:
_UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase = observed_batch_size
# Setting internal index to unwrap the batch
_UpperCamelCase = processed
_UpperCamelCase = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a , __a=None) -> Union[str, Any]:
'''simple docstring'''
super().__init__(_A , _A , _A)
def __iter__( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = iter(self.loader)
_UpperCamelCase = None
return self
def UpperCAmelCase ( self) -> List[str]:
'''simple docstring'''
if self.subiterator is None:
_UpperCamelCase = self.infer(next(self.iterator) , **self.params)
try:
# Try to return next item
_UpperCamelCase = next(self.subiterator)
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
_UpperCamelCase = self.infer(next(self.iterator) , **self.params)
_UpperCamelCase = next(self.subiterator)
return processed
class _UpperCAmelCase( _lowerCamelCase ):
def __iter__( self) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = iter(self.loader)
return self
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCamelCase = False
_UpperCamelCase = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase = self.loader_batch_item()
_UpperCamelCase = item.pop('''is_last''')
accumulator.append(_A)
if is_last:
return accumulator
while not is_last:
_UpperCamelCase = self.infer(next(self.iterator) , **self.params)
if self.loader_batch_size is not None:
if isinstance(_A , torch.Tensor):
_UpperCamelCase = processed
else:
_UpperCamelCase = list(processed.keys())[0]
_UpperCamelCase = processed[key]
if isinstance(_A , _A):
_UpperCamelCase = len(_A)
else:
_UpperCamelCase = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
_UpperCamelCase = observed_batch_size
_UpperCamelCase = processed
_UpperCamelCase = 0
while self._loader_batch_index < self.loader_batch_size:
_UpperCamelCase = self.loader_batch_item()
_UpperCamelCase = item.pop('''is_last''')
accumulator.append(_A)
if is_last:
return accumulator
else:
_UpperCamelCase = processed
_UpperCamelCase = item.pop('''is_last''')
accumulator.append(_A)
return accumulator
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a) -> List[Any]:
'''simple docstring'''
_UpperCamelCase = dataset
_UpperCamelCase = key
def __len__( self) -> Any:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , __a) -> List[str]:
'''simple docstring'''
return self.dataset[i][self.key]
class _UpperCAmelCase( _lowerCamelCase ):
def __init__( self , __a , __a , __a) -> Dict:
'''simple docstring'''
_UpperCamelCase = dataset
_UpperCamelCase = keya
_UpperCamelCase = keya
def __len__( self) -> Any:
'''simple docstring'''
return len(self.dataset)
def __getitem__( self , __a) -> Optional[int]:
'''simple docstring'''
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 194
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def snake_case( ) -> List[str]:
'''simple docstring'''
lowercase : Any = ArgumentParser('''Accelerate CLI tool''' , usage='''accelerate <command> [<args>]''' , allow_abbrev=__magic_name__ )
lowercase : Optional[Any] = parser.add_subparsers(help='''accelerate command helpers''' )
# Register commands
get_config_parser(subparsers=__magic_name__ )
env_command_parser(subparsers=__magic_name__ )
launch_command_parser(subparsers=__magic_name__ )
tpu_command_parser(subparsers=__magic_name__ )
test_command_parser(subparsers=__magic_name__ )
# Let's go
lowercase : Dict = parser.parse_args()
if not hasattr(__magic_name__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
args.func(__magic_name__ )
if __name__ == "__main__":
main()
| 308
| 0
|
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE_: List[str] =logging.get_logger(__name__)
if is_vision_available():
import PIL
class __A ( _lowerCamelCase ):
a__ : Tuple = ['''pixel_values''']
def __init__(self : List[Any] , __a : bool = True , __a : Dict[str, int] = None , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : bool = True , __a : Dict[str, int] = None , __a : bool = True , __a : Union[int, float] = 1 / 255 , __a : bool = True , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = True , **__a : Optional[Any] , ):
super().__init__(**_A )
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 224}
UpperCAmelCase_ = get_size_dict(_A , default_to_square=_A )
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
UpperCAmelCase_ = get_size_dict(_A , default_to_square=_A , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ = do_convert_rgb
def _lowercase (self : Optional[int] , __a : np.ndarray , __a : Dict[str, int] , __a : PILImageResampling = PILImageResampling.BICUBIC , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Optional[Any] , ):
UpperCAmelCase_ = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
UpperCAmelCase_ = get_resize_output_image_size(_A , size=size["shortest_edge"] , default_to_square=_A )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def _lowercase (self : Union[str, Any] , __a : np.ndarray , __a : Dict[str, int] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Dict , ):
UpperCAmelCase_ = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(_A , size=(size["height"], size["width"]) , data_format=_A , **_A )
def _lowercase (self : Tuple , __a : np.ndarray , __a : Union[int, float] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ):
return rescale(_A , scale=_A , data_format=_A , **_A )
def _lowercase (self : Tuple , __a : np.ndarray , __a : Union[float, List[float]] , __a : Union[float, List[float]] , __a : Optional[Union[str, ChannelDimension]] = None , **__a : Union[str, Any] , ):
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def _lowercase (self : str , __a : ImageInput , __a : bool = None , __a : Dict[str, int] = None , __a : PILImageResampling = None , __a : bool = None , __a : int = None , __a : bool = None , __a : float = None , __a : bool = None , __a : Optional[Union[float, List[float]]] = None , __a : Optional[Union[float, List[float]]] = None , __a : bool = None , __a : Optional[Union[str, TensorType]] = None , __a : Optional[ChannelDimension] = ChannelDimension.FIRST , **__a : Optional[int] , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(_A , param_name="size" , default_to_square=_A )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(_A , param_name="crop_size" , default_to_square=_A )
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ = [convert_to_rgb(_A ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(_A ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=_A , size=_A , resample=_A ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=_A , size=_A ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(_A , _A ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=_A , tensor_type=_A )
| 1
|
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def snake_case( __magic_name__ , __magic_name__=False ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F"""module.blocks.{i}.norm1.weight""", F"""vit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm1.bias""", F"""vit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""module.blocks.{i}.attn.proj.weight""", F"""vit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.attn.proj.bias""", F"""vit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.norm2.weight""", F"""vit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""module.blocks.{i}.norm2.bias""", F"""vit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.weight""", F"""vit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc1.bias""", F"""vit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.weight""", F"""vit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""module.blocks.{i}.mlp.fc2.bias""", F"""vit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
('''module.cls_token''', '''vit.embeddings.cls_token'''),
('''module.patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''module.patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''module.pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''module.norm.weight''', '''layernorm.weight'''),
('''module.norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
lowercase : Union[str, Any] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def snake_case( __magic_name__ , __magic_name__ , __magic_name__=False ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.num_hidden_layers ):
if base_model:
lowercase : Optional[int] = ''''''
else:
lowercase : List[Any] = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
lowercase : Tuple = state_dict.pop(F"""module.blocks.{i}.attn.qkv.weight""" )
lowercase : List[Any] = state_dict.pop(F"""module.blocks.{i}.attn.qkv.bias""" )
# next, add query, keys and values (in that order) to the state dict
lowercase : Tuple = in_proj_weight[
: config.hidden_size, :
]
lowercase : str = in_proj_bias[: config.hidden_size]
lowercase : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
lowercase : Dict = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
lowercase : Any = in_proj_weight[
-config.hidden_size :, :
]
lowercase : Optional[int] = in_proj_bias[-config.hidden_size :]
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : str = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : Any = [
'''module.fc.fc1.weight''',
'''module.fc.fc1.bias''',
'''module.fc.bn1.weight''',
'''module.fc.bn1.bias''',
'''module.fc.bn1.running_mean''',
'''module.fc.bn1.running_var''',
'''module.fc.bn1.num_batches_tracked''',
'''module.fc.fc2.weight''',
'''module.fc.fc2.bias''',
'''module.fc.bn2.weight''',
'''module.fc.bn2.bias''',
'''module.fc.bn2.running_mean''',
'''module.fc.bn2.running_var''',
'''module.fc.bn2.num_batches_tracked''',
'''module.fc.fc3.weight''',
'''module.fc.fc3.bias''',
]
for k in ignore_keys:
state_dict.pop(__magic_name__ , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Any:
'''simple docstring'''
lowercase : List[Any] = dct.pop(__magic_name__ )
lowercase : Union[str, Any] = val
def snake_case( __magic_name__ , __magic_name__ ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = ViTMSNConfig()
lowercase : str = 10_00
lowercase : List[str] = '''datasets/huggingface/label-files'''
lowercase : List[str] = '''imagenet-1k-id2label.json'''
lowercase : Any = json.load(open(hf_hub_download(__magic_name__ , __magic_name__ ) , '''r''' ) )
lowercase : Union[str, Any] = {int(__magic_name__ ): v for k, v in idalabel.items()}
lowercase : Any = idalabel
lowercase : List[Any] = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
lowercase : int = 3_84
lowercase : Optional[Any] = 15_36
lowercase : Tuple = 6
elif "l16" in checkpoint_url:
lowercase : Union[str, Any] = 10_24
lowercase : List[str] = 40_96
lowercase : int = 24
lowercase : Union[str, Any] = 16
lowercase : Tuple = 0.1
elif "b4" in checkpoint_url:
lowercase : Union[str, Any] = 4
elif "l7" in checkpoint_url:
lowercase : Dict = 7
lowercase : List[Any] = 10_24
lowercase : str = 40_96
lowercase : int = 24
lowercase : Dict = 16
lowercase : Tuple = 0.1
lowercase : int = ViTMSNModel(__magic_name__ )
lowercase : List[str] = torch.hub.load_state_dict_from_url(__magic_name__ , map_location='''cpu''' )['''target_encoder''']
lowercase : Any = ViTImageProcessor(size=config.image_size )
remove_projection_head(__magic_name__ )
lowercase : List[str] = create_rename_keys(__magic_name__ , base_model=__magic_name__ )
for src, dest in rename_keys:
rename_key(__magic_name__ , __magic_name__ , __magic_name__ )
read_in_q_k_v(__magic_name__ , __magic_name__ , base_model=__magic_name__ )
model.load_state_dict(__magic_name__ )
model.eval()
lowercase : Optional[int] = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase : Optional[int] = Image.open(requests.get(__magic_name__ , stream=__magic_name__ ).raw )
lowercase : Dict = ViTImageProcessor(
size=config.image_size , image_mean=__magic_name__ , image_std=__magic_name__ )
lowercase : List[str] = image_processor(images=__magic_name__ , return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
lowercase : int = model(**__magic_name__ )
lowercase : Optional[Any] = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
lowercase : List[str] = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
lowercase : Any = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
lowercase : Dict = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
lowercase : Tuple = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
lowercase : Optional[int] = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __magic_name__ , atol=1e-4 )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__magic_name__ )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(__magic_name__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCAmelCase_ = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 308
| 0
|
import fire
from utils import calculate_rouge, save_json
def a( A : Dict , A : Any , A : Union[str, Any]=None , **A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
a = [x.strip() for x in open(A ).readlines()]
a = [x.strip() for x in open(A ).readlines()][: len(A )]
a = calculate_rouge(A , A , **A )
if save_path is not None:
save_json(A , A , indent=A )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 227
|
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''')
| 308
| 0
|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_A = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def lowerCamelCase__ ( ) -> Optional[Any]:
UpperCamelCase_ = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
UpperCamelCase_ = get_sagemaker_input()
else:
UpperCamelCase_ = get_cluster_input()
return config
def lowerCamelCase__ ( a__ : List[str]=None ) -> Union[str, Any]:
if subparsers is not None:
UpperCamelCase_ = subparsers.add_parser("""config""" , description=a__ )
else:
UpperCamelCase_ = argparse.ArgumentParser("""Accelerate config command""" , description=a__ )
parser.add_argument(
"""--config_file""" , default=a__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have """
"""such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed """
"""with \'huggingface\'."""
) , )
if subparsers is not None:
parser.set_defaults(func=a__ )
return parser
def lowerCamelCase__ ( a__ : Any ) -> List[Any]:
UpperCamelCase_ = get_user_input()
if args.config_file is not None:
UpperCamelCase_ = args.config_file
else:
if not os.path.isdir(a__ ):
os.makedirs(a__ )
UpperCamelCase_ = default_yaml_config_file
if config_file.endswith(""".json""" ):
config.to_json_file(a__ )
else:
config.to_yaml_file(a__ )
print(f'''accelerate configuration saved at {config_file}''' )
def lowerCamelCase__ ( ) -> str:
UpperCamelCase_ = config_command_parser()
UpperCamelCase_ = parser.parse_args()
config_command(a__ )
if __name__ == "__main__":
main()
| 122
|
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class _A ( _lowerCamelCase ):
def __init__( self : Tuple , _A : Dict , _A : Tuple , _A : List[Any]=1_024 , _A : str=1_024 , _A : str=3.6 ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = tokenizer
lowercase : List[Any] = tokenizer.bos_token_id
lowercase : Union[str, Any] = dataset
lowercase : Union[str, Any] = seq_length
lowercase : Optional[int] = seq_length * chars_per_token * num_of_sequences
def __iter__( self : int ) -> int:
"""simple docstring"""
lowercase : Dict = iter(self.dataset )
lowercase : Union[str, Any] = True
while more_examples:
lowercase , lowercase : Tuple = [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(_A )['''content'''] )
buffer_len += len(buffer[-1] )
except StopIteration:
lowercase : List[str] = False
break
lowercase : str = tokenizer(_A , truncation=_A )['''input_ids''']
lowercase : List[str] = []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(_A ) , self.seq_length ):
lowercase : int = all_token_ids[i : i + self.seq_length]
if len(_A ) == self.seq_length:
yield torch.tensor(_A )
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : List[str] = {'''streaming''': True}
lowercase : Dict = load_dataset(args.dataset_name , split='''train''' , **__magic_name__ )
lowercase : int = ConstantLengthDataset(__magic_name__ , __magic_name__ , seq_length=args.seq_length )
lowercase : Tuple = DataLoader(__magic_name__ , batch_size=args.batch_size )
return eval_dataloader
def snake_case( __magic_name__ ) -> str:
'''simple docstring'''
model.eval()
lowercase : str = []
for step, batch in enumerate(__magic_name__ ):
with torch.no_grad():
lowercase : List[Any] = model(__magic_name__ , labels=__magic_name__ )
lowercase : List[Any] = outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(__magic_name__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
lowercase : Union[str, Any] = torch.mean(torch.cat(__magic_name__ ) )
try:
lowercase : Tuple = torch.exp(__magic_name__ )
except OverflowError:
lowercase : List[str] = float('''inf''' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase_ = Accelerator()
# Parse configuration
lowerCAmelCase_ = HfArgumentParser(EvaluationArguments)
lowerCAmelCase_ = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase_ = logging.getLogger(__name__)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase_ = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase_ = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase_ = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase_ , lowerCAmelCase_ = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('Evaluating and saving model after training')
lowerCAmelCase_ , lowerCAmelCase_ = evaluate(args)
logger.info(f'''loss/eval: {eval_loss}, perplexity: {perplexity}''')
| 308
| 0
|
def a ( snake_case__: int , snake_case__: Optional[Any] , snake_case__: int , snake_case__: Tuple ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def a ( snake_case__: List[str] , snake_case__: str , snake_case__: Optional[int] ):
'''simple docstring'''
if curr_ind == len(snake_case__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case__ ) ):
if valid_connection(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
# Insert current vertex into path as next transition
lowercase_ = next_ver
# Validate created path
if util_hamilton_cycle(snake_case__ , snake_case__ , curr_ind + 1 ):
return True
# Backtrack
lowercase_ = -1
return False
def a ( snake_case__: Any , snake_case__: str = 0 ):
'''simple docstring'''
lowercase_ = [-1] * (len(snake_case__ ) + 1)
# initialize start and end of path with starting index
lowercase_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case__ , snake_case__ , 1 ) else []
| 30
|
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> Optional[Any]:
'''simple docstring'''
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = '''mock-s3-bucket'''
lowercase : Optional[int] = F"""s3://{mock_bucket}"""
lowercase : List[Any] = extract_path_from_uri(__magic_name__ )
assert dataset_path.startswith('''s3://''' ) is False
lowercase : Optional[int] = '''./local/path'''
lowercase : Dict = extract_path_from_uri(__magic_name__ )
assert dataset_path == new_dataset_path
def snake_case( __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : Tuple = is_remote_filesystem(__magic_name__ )
assert is_remote is True
lowercase : int = fsspec.filesystem('''file''' )
lowercase : Optional[Any] = is_remote_filesystem(__magic_name__ )
assert is_remote is False
@pytest.mark.parametrize('''compression_fs_class''' , __magic_name__ )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_file, '''bz2''': bza_file, '''lz4''': lza_file}
lowercase : List[Any] = input_paths[compression_fs_class.protocol]
if input_path is None:
lowercase : Dict = F"""for '{compression_fs_class.protocol}' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(__magic_name__ )
lowercase : Any = fsspec.filesystem(compression_fs_class.protocol , fo=__magic_name__ )
assert isinstance(__magic_name__ , __magic_name__ )
lowercase : List[Any] = os.path.basename(__magic_name__ )
lowercase : Tuple = expected_filename[: expected_filename.rindex('''.''' )]
assert fs.glob('''*''' ) == [expected_filename]
with fs.open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as f, open(__magic_name__ , encoding='''utf-8''' ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize('''protocol''' , ['''zip''', '''gzip'''] )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[int]:
'''simple docstring'''
lowercase : Optional[Any] = {'''zip''': zip_jsonl_path, '''gzip''': jsonl_gz_path}
lowercase : List[str] = compressed_file_paths[protocol]
lowercase : str = '''dataset.jsonl'''
lowercase : List[str] = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
lowercase , *lowercase : Tuple = fsspec.get_fs_token_paths(__magic_name__ )
assert fs.isfile(__magic_name__ )
assert not fs.isfile('''non_existing_''' + member_file_path )
@pytest.mark.integration
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> Dict:
'''simple docstring'''
lowercase : Optional[Any] = hf_api.dataset_info(__magic_name__ , token=__magic_name__ )
lowercase : int = HfFileSystem(repo_info=__magic_name__ , token=__magic_name__ )
assert sorted(hffs.glob('''*''' ) ) == [".gitattributes", "data"]
assert hffs.isdir('''data''' )
assert hffs.isfile('''.gitattributes''' ) and hffs.isfile('''data/text_data.txt''' )
with open(__magic_name__ ) as f:
assert hffs.open('''data/text_data.txt''' , '''r''' ).read() == f.read()
def snake_case( ) -> List[Any]:
'''simple docstring'''
lowercase : List[Any] = '''bz2'''
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(__magic_name__ , __magic_name__ , clobber=__magic_name__ )
with pytest.warns(__magic_name__ ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(__magic_name__ ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
)
| 308
| 0
|
"""simple docstring"""
A: Any = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
A: str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def _snake_case ( UpperCamelCase : Optional[int] , UpperCamelCase : Tuple , UpperCamelCase : List[Any] ):
UpperCAmelCase : Any = True
UpperCAmelCase : Tuple = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
order.append(UpperCamelCase )
return order
def _snake_case ( UpperCamelCase : List[str] , UpperCamelCase : Dict , UpperCamelCase : Dict ):
UpperCAmelCase : Tuple = True
UpperCAmelCase : Any = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return component
def _snake_case ( UpperCamelCase : List[str] ):
UpperCAmelCase : List[Any] = len(UpperCamelCase ) * [False]
UpperCAmelCase : dict[int, list[int]] = {vert: [] for vert in range(len(UpperCamelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(UpperCamelCase )
UpperCAmelCase : List[str] = []
for i, was_visited in enumerate(UpperCamelCase ):
if not was_visited:
order += topology_sort(UpperCamelCase , UpperCamelCase , UpperCamelCase )
UpperCAmelCase : Optional[int] = []
UpperCAmelCase : str = len(UpperCamelCase ) * [False]
for i in range(len(UpperCamelCase ) ):
UpperCAmelCase : List[str] = order[len(UpperCamelCase ) - i - 1]
if not visited[vert]:
UpperCAmelCase : Tuple = find_components(UpperCamelCase , UpperCamelCase , UpperCamelCase )
components_list.append(UpperCamelCase )
return components_list
| 109
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( enum.Enum ):
_UpperCamelCase : Union[str, Any] = 0
_UpperCamelCase : Any = 1
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[Any] = '''generated'''
def __init__( self : str , *_A : int , **_A : str ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def __a ( self : int , _A : Union[str, Any]=None , _A : Optional[Any]=None , _A : Dict=None , _A : Dict=None , _A : Union[str, Any]=None , _A : int=None , **_A : Optional[int] , ) -> List[Any]:
"""simple docstring"""
lowercase : str = {}
if truncation is not None:
lowercase : Tuple = truncation
lowercase : Tuple = generate_kwargs
lowercase : Optional[Any] = {}
if return_tensors is not None and return_type is None:
lowercase : int = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
lowercase : Dict = return_type
if clean_up_tokenization_spaces is not None:
lowercase : Dict = clean_up_tokenization_spaces
if stop_sequence is not None:
lowercase : Dict = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
lowercase : List[str] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __a ( self : str , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
return True
def __a ( self : Union[str, Any] , *_A : Union[str, Any] , _A : List[Any] ) -> Dict:
"""simple docstring"""
lowercase : Tuple = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError('''Please make sure that the tokenizer has a pad_token_id when using a batch input''' )
lowercase : List[Any] = ([prefix + arg for arg in args[0]],)
lowercase : Dict = True
elif isinstance(args[0] , _A ):
lowercase : Optional[int] = (prefix + args[0],)
lowercase : Union[str, Any] = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
lowercase : Any = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Union[str, Any] , *_A : Optional[int] , **_A : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def __a ( self : Optional[Any] , _A : Optional[Any] , _A : Union[str, Any]=TruncationStrategy.DO_NOT_TRUNCATE , **_A : List[str] ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def __a ( self : int , _A : Optional[Any] , **_A : Any ) -> Any:
"""simple docstring"""
if self.framework == "pt":
lowercase , lowercase : List[Any] = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
lowercase , lowercase : Optional[Any] = tf.shape(model_inputs['''input_ids'''] ).numpy()
lowercase : int = generate_kwargs.get('''min_length''' , self.model.config.min_length )
lowercase : Optional[int] = generate_kwargs.get('''max_length''' , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs['''min_length'''] , generate_kwargs['''max_length'''] )
lowercase : int = self.model.generate(**_A , **_A )
lowercase : int = output_ids.shape[0]
if self.framework == "pt":
lowercase : Optional[Any] = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
lowercase : Tuple = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def __a ( self : Union[str, Any] , _A : str , _A : Optional[int]=ReturnType.TEXT , _A : Optional[int]=False ) -> Tuple:
"""simple docstring"""
lowercase : Any = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
lowercase : Union[str, Any] = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
lowercase : Dict = {
f"""{self.return_name}_text""": self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''summary'''
def __call__( self : List[Any] , *_A : List[str] , **_A : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
return super().__call__(*_A , **_A )
def __a ( self : Any , _A : int , _A : int , _A : int ) -> bool:
"""simple docstring"""
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
'''a summarization task, where outputs shorter than the input are typically wanted, you might '''
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_lowerCamelCase )
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = '''translation'''
def __a ( self : Union[str, Any] , _A : int , _A : int , _A : int ) -> List[Any]:
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
'''increasing your max_length manually, e.g. translator(\'...\', max_length=400)''' )
return True
def __a ( self : Optional[Any] , *_A : Optional[Any] , _A : Optional[int]=TruncationStrategy.DO_NOT_TRUNCATE , _A : List[Any]=None , _A : Any=None ) -> Dict:
"""simple docstring"""
if getattr(self.tokenizer , '''_build_translation_inputs''' , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def __a ( self : Any , _A : Tuple=None , _A : Any=None , **_A : Any ) -> Optional[int]:
"""simple docstring"""
lowercase , lowercase , lowercase : Dict = super()._sanitize_parameters(**_A )
if src_lang is not None:
lowercase : Optional[Any] = src_lang
if tgt_lang is not None:
lowercase : Dict = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
lowercase : Dict = kwargs.get('''task''' , self.task )
lowercase : List[str] = task.split('''_''' )
if task and len(_A ) == 4:
# translation, XX, to YY
lowercase : Any = items[1]
lowercase : List[str] = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : Tuple , *_A : Union[str, Any] , **_A : List[Any] ) -> List[Any]:
"""simple docstring"""
return super().__call__(*_A , **_A )
| 308
| 0
|
import argparse
import logging
import pickle
import random
import time
import numpy as np
from transformers import BertTokenizer, GPTaTokenizer, RobertaTokenizer
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
UpperCAmelCase_ = logging.getLogger(__name__)
def lowerCamelCase__ ( ):
'''simple docstring'''
__lowerCamelCase = argparse.ArgumentParser(
description="""Preprocess the data to avoid re-doing it several times by (tokenization + token_to_ids).""" )
parser.add_argument("""--file_path""" , type=A__ , default="""data/dump.txt""" , help="""The path to the data.""" )
parser.add_argument("""--tokenizer_type""" , type=A__ , default="""bert""" , choices=["""bert""", """roberta""", """gpt2"""] )
parser.add_argument("""--tokenizer_name""" , type=A__ , default="""bert-base-uncased""" , help="""The tokenizer to use.""" )
parser.add_argument("""--dump_file""" , type=A__ , default="""data/dump""" , help="""The dump file prefix.""" )
__lowerCamelCase = parser.parse_args()
logger.info(f'Loading Tokenizer ({args.tokenizer_name})' )
if args.tokenizer_type == "bert":
__lowerCamelCase = BertTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `[CLS]`
__lowerCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `[SEP]`
elif args.tokenizer_type == "roberta":
__lowerCamelCase = RobertaTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase = tokenizer.special_tokens_map['''cls_token'''] # `<s>`
__lowerCamelCase = tokenizer.special_tokens_map['''sep_token'''] # `</s>`
elif args.tokenizer_type == "gpt2":
__lowerCamelCase = GPTaTokenizer.from_pretrained(args.tokenizer_name )
__lowerCamelCase = tokenizer.special_tokens_map['''bos_token'''] # `<|endoftext|>`
__lowerCamelCase = tokenizer.special_tokens_map['''eos_token'''] # `<|endoftext|>`
logger.info(f'Loading text from {args.file_path}' )
with open(args.file_path , """r""" , encoding="""utf8""" ) as fp:
__lowerCamelCase = fp.readlines()
logger.info("""Start encoding""" )
logger.info(f'{len(A__ )} examples to process.' )
__lowerCamelCase = []
__lowerCamelCase = 0
__lowerCamelCase = 10000
__lowerCamelCase = time.time()
for text in data:
__lowerCamelCase = f'{bos} {text.strip()} {sep}'
__lowerCamelCase = tokenizer.encode(A__ , add_special_tokens=A__ )
rslt.append(A__ )
iter += 1
if iter % interval == 0:
__lowerCamelCase = time.time()
logger.info(f'{iter} examples processed. - {(end-start):.2f}s/{interval}expl' )
__lowerCamelCase = time.time()
logger.info("""Finished binarization""" )
logger.info(f'{len(A__ )} examples processed.' )
__lowerCamelCase = f'{args.dump_file}.{args.tokenizer_name}.pickle'
__lowerCamelCase = tokenizer.vocab_size
if vocab_size < (1 << 16):
__lowerCamelCase = [np.uintaa(A__ ) for d in rslt]
else:
__lowerCamelCase = [np.intaa(A__ ) for d in rslt]
random.shuffle(rslt_ )
logger.info(f'Dump to {dp_file}' )
with open(A__ , """wb""" ) as handle:
pickle.dump(rslt_ , A__ , protocol=pickle.HIGHEST_PROTOCOL )
if __name__ == "__main__":
main()
| 12
|
# Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCAmelCase_ = get_logger(__name__)
class _A :
_UpperCamelCase : int = '''dummy_data'''
_UpperCamelCase : Tuple = '''datasets'''
_UpperCamelCase : Optional[int] = False
def __init__( self : Any , _A : str , _A : str , _A : Union[Version, str] , _A : Optional[str] = None , _A : bool = False , _A : bool = True , _A : Optional[List[Callable]] = None , ) -> Dict:
"""simple docstring"""
lowercase : Tuple = 0
lowercase : List[Any] = dataset_name
lowercase : int = cache_dir
lowercase : str = use_local_dummy_data
lowercase : Union[str, Any] = config
# download_callbacks take a single url as input
lowercase : List[Callable] = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
lowercase : Any = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
lowercase : Union[str, Any] = str(_A )
# to be downloaded
lowercase : Tuple = None
lowercase : Optional[int] = None
@property
def __a ( self : str ) -> Dict:
"""simple docstring"""
if self._dummy_file is None:
lowercase : Optional[Any] = self.download_dummy_data()
return self._dummy_file
@property
def __a ( self : int ) -> Optional[Any]:
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join('''dummy''' , self.config.name , self.version_name )
# structure is dummy / version_name
return os.path.join('''dummy''' , self.version_name )
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return os.path.join(self.dummy_data_folder , '''dummy_data.zip''' )
def __a ( self : str ) -> int:
"""simple docstring"""
lowercase : str = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
lowercase : List[str] = cached_path(
_A , cache_dir=self.cache_dir , extract_compressed_file=_A , force_extract=_A )
return os.path.join(_A , self.dummy_file_name )
@property
def __a ( self : str ) -> Tuple:
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir , self.dataset_name , self.dummy_zip_file )
@property
def __a ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
if self._bucket_url is None:
lowercase : Optional[Any] = hf_github_url(self.dataset_name , self.dummy_zip_file.replace(os.sep , '''/''' ) )
return self._bucket_url
@property
def __a ( self : Tuple ) -> List[str]:
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep , '''/''' ).split('''/''' )[:-1] )
def __a ( self : Union[str, Any] , _A : Dict , *_A : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
lowercase : Union[str, Any] = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
lowercase : Optional[Any] = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_A , _A ):
return self.create_dummy_data_dict(_A , _A )
elif isinstance(_A , (list, tuple) ):
return self.create_dummy_data_list(_A , _A )
else:
return self.create_dummy_data_single(_A , _A )
def __a ( self : str , _A : Union[str, Any] , *_A : Dict ) -> Dict:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : str , _A : List[str] , _A : Any ) -> Union[str, Any]:
"""simple docstring"""
return self.download_and_extract(_A )
def __a ( self : Optional[int] , _A : Tuple , *_A : str , **_A : Any ) -> Optional[Any]:
"""simple docstring"""
return path
def __a ( self : List[str] ) -> str:
"""simple docstring"""
return {}
def __a ( self : List[str] , _A : Union[str, Any] , _A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase : Any = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_A , _A ):
for single_url in single_urls:
download_callback(_A )
else:
lowercase : List[str] = single_urls
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_A , _A ):
lowercase : int = [os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) ) for x in single_urls]
else:
lowercase : int = single_urls
lowercase : Any = os.path.join(_A , urllib.parse.quote_plus(Path(_A ).name ) )
lowercase : str = value
# make sure that values are unique
if all(isinstance(_A , _A ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
lowercase : str = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def __a ( self : Optional[int] , _A : List[Any] , _A : Tuple ) -> Tuple:
"""simple docstring"""
lowercase : Optional[Any] = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
lowercase : Union[str, Any] = all(bool(re.findall('''[0-9]{3,}-of-[0-9]{3,}''' , _A ) ) for url in data_url )
lowercase : str = all(
url.startswith('''https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed''' ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
lowercase : List[str] = [data_url[0]] * len(_A )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Optional[int] = os.path.join(_A , urllib.parse.quote_plus(single_url.split('''/''' )[-1] ) )
dummy_data_list.append(_A )
return dummy_data_list
def __a ( self : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[str]:
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_A )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
lowercase : Dict = os.path.join(_A , urllib.parse.quote_plus(data_url.split('''/''' )[-1] ) )
if os.path.exists(_A ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def __a ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
def __a ( self : Any ) -> Dict:
"""simple docstring"""
pass
def __a ( self : int , _A : Optional[Any] ) -> Dict:
"""simple docstring"""
def _iter_archive_members(_A : Optional[int] ):
# this preserves the order of the members inside the ZIP archive
lowercase : int = Path(self.dummy_file ).parent
lowercase : List[str] = path.relative_to(_A )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
lowercase : Optional[int] = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_A )
lowercase : Tuple = Path(_A )
lowercase : List[Any] = _iter_archive_members(_A ) if self.use_local_dummy_data else path.rglob('''*''' )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith(('''.''', '''__''') ):
yield file_path.relative_to(_A ).as_posix(), file_path.open('''rb''' )
def __a ( self : Optional[Any] , _A : Dict ) -> Union[str, Any]:
"""simple docstring"""
if not isinstance(_A , _A ):
lowercase : Dict = [paths]
for path in paths:
if os.path.isfile(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_A ):
if os.path.basename(_A ).startswith(('''.''', '''__''') ):
continue
dirnames.sort()
for filename in sorted(_A ):
if filename.startswith(('''.''', '''__''') ):
continue
yield os.path.join(_A , _A )
| 308
| 0
|
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowerCAmelCase : str = logging.get_logger(__name__)
_lowerCAmelCase : List[Any] = {
'''google/bigbird-roberta-base''': '''https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json''',
'''google/bigbird-roberta-large''': '''https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json''',
'''google/bigbird-base-trivia-itc''': '''https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json''',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __magic_name__ ( _lowerCamelCase ):
"""simple docstring"""
__UpperCamelCase = '''big_bird'''
def __init__( self :Any , snake_case :Optional[int]=50_358 , snake_case :Union[str, Any]=768 , snake_case :List[str]=12 , snake_case :Optional[int]=12 , snake_case :int=3_072 , snake_case :Optional[int]="gelu_new" , snake_case :str=0.1 , snake_case :List[str]=0.1 , snake_case :int=4_096 , snake_case :List[Any]=2 , snake_case :Union[str, Any]=0.02 , snake_case :Union[str, Any]=1e-12 , snake_case :Dict=True , snake_case :Optional[int]=0 , snake_case :int=1 , snake_case :List[Any]=2 , snake_case :Dict=66 , snake_case :int="block_sparse" , snake_case :Union[str, Any]=True , snake_case :Any=False , snake_case :Union[str, Any]=64 , snake_case :Optional[Any]=3 , snake_case :Tuple=None , **snake_case :int , ):
'''simple docstring'''
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , sep_token_id=_A , **_A , )
A_ : str = vocab_size
A_ : Union[str, Any] = max_position_embeddings
A_ : List[Any] = hidden_size
A_ : str = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = intermediate_size
A_ : str = hidden_act
A_ : Optional[Any] = hidden_dropout_prob
A_ : str = attention_probs_dropout_prob
A_ : Tuple = initializer_range
A_ : Optional[int] = type_vocab_size
A_ : Tuple = layer_norm_eps
A_ : Union[str, Any] = use_cache
A_ : Optional[Any] = rescale_embeddings
A_ : str = attention_type
A_ : Dict = use_bias
A_ : Optional[int] = block_size
A_ : Any = num_random_blocks
A_ : int = classifier_dropout
class __magic_name__ ( _lowerCamelCase ):
"""simple docstring"""
@property
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Dict = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A_ : Tuple = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 300
|
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ) -> str:
'''simple docstring'''
lowercase : Union[str, Any] = [False] * len(__magic_name__ )
lowercase : Optional[int] = []
queue.append(__magic_name__ )
lowercase : int = True
while queue:
lowercase : Union[str, Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__magic_name__ )
lowercase : Dict = True
lowercase : List[str] = u
return visited[t]
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Tuple:
'''simple docstring'''
lowercase : List[str] = [-1] * (len(__magic_name__ ))
lowercase : Tuple = 0
while bfs(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
lowercase : Any = float('''Inf''' )
lowercase : str = sink
while s != source:
# Find the minimum value in select path
lowercase : Any = min(__magic_name__ , graph[parent[s]][s] )
lowercase : Dict = parent[s]
max_flow += path_flow
lowercase : Union[str, Any] = sink
while v != source:
lowercase : List[str] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase : Optional[int] = parent[v]
return max_flow
lowerCAmelCase_ = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
lowerCAmelCase_ , lowerCAmelCase_ = 0, 5
print(ford_fulkerson(graph, source, sink))
| 308
| 0
|
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase__ ( ):
snake_case : List[str] = ArgumentParser(
description=(
"PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"
) )
# Optional arguments for the launch helper
parser.add_argument("--num_cores" , type=lowercase__ , default=1 , help="Number of TPU cores to use (1 or 8)." )
# positional
parser.add_argument(
"training_script" , type=lowercase__ , help=(
"The full path to the single TPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script"
) , )
# rest from the training program
parser.add_argument("training_script_args" , nargs=lowercase__ )
return parser.parse_args()
def UpperCamelCase__ ( ):
snake_case : Optional[Any] = parse_args()
# Import training_script as a module.
snake_case : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
snake_case : int = script_fpath.stem
snake_case : List[Any] = importlib.import_module(lowercase__ )
# Patch sys.argv
snake_case : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 148
|
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'vocab_file': 'vocab.txt'}
lowerCAmelCase_ = {
'vocab_file': {
'openbmb/cpm-ant-10b': 'https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt',
},
}
lowerCAmelCase_ = {
'openbmb/cpm-ant-10b': 10_24,
}
def snake_case( __magic_name__ ) -> int:
'''simple docstring'''
lowercase : Optional[int] = collections.OrderedDict()
with open(__magic_name__ , '''r''' , encoding='''utf-8''' ) as reader:
lowercase : str = reader.readlines()
for index, token in enumerate(__magic_name__ ):
lowercase : Union[str, Any] = token.rstrip('''\n''' )
lowercase : List[Any] = index
return vocab
class _A ( _lowerCamelCase ):
def __init__( self : List[str] , _A : Any , _A : List[str]="<unk>" , _A : Union[str, Any]=200 ) -> List[Any]:
"""simple docstring"""
lowercase : Optional[int] = vocab
lowercase : List[str] = unk_token
lowercase : Any = max_input_chars_per_word
def __a ( self : List[str] , _A : Tuple ) -> str:
"""simple docstring"""
lowercase : Dict = list(_A )
if len(_A ) > self.max_input_chars_per_word:
return [self.unk_token]
lowercase : int = 0
lowercase : Dict = []
while start < len(_A ):
lowercase : Optional[Any] = len(_A )
lowercase : List[str] = None
while start < end:
lowercase : List[Any] = ''''''.join(chars[start:end] )
if substr in self.vocab:
lowercase : Union[str, Any] = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_A )
lowercase : Dict = end
return sub_tokens
class _A ( _lowerCamelCase ):
_UpperCamelCase : List[str] = VOCAB_FILES_NAMES
_UpperCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
_UpperCamelCase : int = False
def __init__( self : List[str] , _A : int , _A : Optional[Any]="<d>" , _A : Any="</d>" , _A : Optional[Any]="<s>" , _A : Any="</s>" , _A : Any="<pad>" , _A : List[Any]="<unk>" , _A : Optional[Any]="</n>" , _A : List[str]="</_>" , _A : Optional[Any]="left" , **_A : str , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['''jieba'''] )
super().__init__(
bod_token=_A , eod_token=_A , bos_token=_A , eos_token=_A , pad_token=_A , unk_token=_A , line_token=_A , space_token=_A , padding_side=_A , **_A , )
lowercase : str = bod_token
lowercase : str = eod_token
lowercase : Any = load_vocab(_A )
lowercase : List[Any] = self.encoder[space_token]
lowercase : Tuple = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
lowercase : Any = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
lowercase : int = {v: k for k, v in self.encoder.items()}
lowercase : Optional[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __a ( self : Dict ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __a ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __a ( self : List[str] ) -> List[str]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __a ( self : List[Any] ) -> int:
"""simple docstring"""
return len(self.encoder )
def __a ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __a ( self : str , _A : List[str] ) -> Tuple:
"""simple docstring"""
lowercase : int = []
for x in jieba.cut(_A , cut_all=_A ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_A ) )
return output_tokens
def __a ( self : List[Any] , _A : Tuple , **_A : Optional[int] ) -> Any:
"""simple docstring"""
lowercase : List[str] = [i for i in token_ids if i >= 0]
lowercase : Any = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_A , **_A )
def __a ( self : List[Any] , _A : int ) -> Optional[Any]:
"""simple docstring"""
return token in self.encoder
def __a ( self : Dict , _A : List[str] ) -> str:
"""simple docstring"""
return "".join(_A )
def __a ( self : List[str] , _A : List[str] ) -> Any:
"""simple docstring"""
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __a ( self : Tuple , _A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return self.decoder.get(_A , self.unk_token )
def __a ( self : List[Any] , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(_A ):
lowercase : str = os.path.join(
_A , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
else:
lowercase : Optional[int] = (filename_prefix + '''-''' if filename_prefix else '''''') + save_directory
lowercase : Any = 0
if " " in self.encoder:
lowercase : List[Any] = self.encoder[''' ''']
del self.encoder[" "]
if "\n" in self.encoder:
lowercase : Dict = self.encoder['''\n''']
del self.encoder["\n"]
lowercase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _A : x[1] ) )
with open(_A , '''w''' , encoding='''utf-8''' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
''' Please check that the vocabulary is not corrupted!''' )
lowercase : Any = token_index
writer.write(token + '''\n''' )
index += 1
return (vocab_file,)
def __a ( self : str , _A : List[int] , _A : List[int] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __a ( self : int , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is not None:
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A ))
return [1] + ([0] * len(_A ))
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterator
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : Tuple = value
_lowerCamelCase : Node | None = None
_lowerCamelCase : Node | None = None
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self , lowercase ):
_lowerCamelCase : int = tree
def A_ ( self , lowercase ):
if node is None:
return 0
return node.value + (
self.depth_first_search(node.left ) + self.depth_first_search(node.right )
)
def __iter__( self ):
yield self.depth_first_search(self.tree )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 96
|
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> Optional[Any]:
'''simple docstring'''
lowercase : int = 1.5
lowercase : int = int(factor * num_class_images )
lowercase : Any = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=__magic_name__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
lowercase : str = client.query(text=__magic_name__ )
if len(__magic_name__ ) >= factor * num_class_images or num_images > 1e4:
break
else:
lowercase : List[str] = int(factor * num_images )
lowercase : List[str] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=__magic_name__ , aesthetic_weight=0.1 , )
lowercase : Dict = 0
lowercase : Optional[Any] = 0
lowercase : List[Any] = tqdm(desc='''downloading real regularization images''' , total=__magic_name__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
lowercase : int = class_images[count]
count += 1
try:
lowercase : int = requests.get(images['''url'''] )
if img.status_code == 2_00:
lowercase : List[Any] = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def snake_case( ) -> Optional[int]:
'''simple docstring'''
lowercase : List[str] = argparse.ArgumentParser('''''' , add_help=__magic_name__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=__magic_name__ , type=__magic_name__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=__magic_name__ )
return parser.parse_args()
if __name__ == "__main__":
lowerCAmelCase_ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 308
| 0
|
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 247
|
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def snake_case( ) -> int:
'''simple docstring'''
lowercase : List[str] = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=__magic_name__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=__magic_name__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=__magic_name__ )
return parser.parse_args()
def snake_case( ) -> Union[str, Any]:
'''simple docstring'''
lowercase : Optional[Any] = parse_args()
# Import training_script as a module.
lowercase : Optional[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase : int = script_fpath.stem
lowercase : List[Any] = importlib.import_module(__magic_name__ )
# Patch sys.argv
lowercase : str = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 308
| 0
|
"""simple docstring"""
from __future__ import annotations
_a = {
"""A""": ["""B""", """C""", """E"""],
"""B""": ["""A""", """D""", """E"""],
"""C""": ["""A""", """F""", """G"""],
"""D""": ["""B"""],
"""E""": ["""A""", """B""", """D"""],
"""F""": ["""C"""],
"""G""": ["""C"""],
}
class _UpperCAmelCase:
def __init__( self , __a , __a) -> None:
'''simple docstring'''
_UpperCamelCase = graph
# mapping node to its parent in resulting breadth first tree
_UpperCamelCase = {}
_UpperCamelCase = source_vertex
def UpperCAmelCase ( self) -> None:
'''simple docstring'''
_UpperCamelCase = {self.source_vertex}
_UpperCamelCase = None
_UpperCamelCase = [self.source_vertex] # first in first out queue
while queue:
_UpperCamelCase = queue.pop(0)
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_A)
_UpperCamelCase = vertex
queue.append(_A)
def UpperCAmelCase ( self , __a) -> str:
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
_UpperCamelCase = self.parent.get(_A)
if target_vertex_parent is None:
_UpperCamelCase = (
F'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_A)
return self.shortest_path(_A) + F'''->{target_vertex}'''
if __name__ == "__main__":
_a = Graph(graph, """G""")
g.breath_first_search()
print(g.shortest_path("""D"""))
print(g.shortest_path("""G"""))
print(g.shortest_path("""Foo"""))
| 194
|
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase_ = logging.get_logger(__name__)
def snake_case( __magic_name__ ) -> List[List[ImageInput]]:
'''simple docstring'''
if isinstance(__magic_name__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__magic_name__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__magic_name__ ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class _A ( _lowerCamelCase ):
_UpperCamelCase : str = ['''pixel_values''']
def __init__( self : List[str] , _A : bool = True , _A : Dict[str, int] = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Dict[str, int] = None , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : Optional[int] , ) -> None:
"""simple docstring"""
super().__init__(**_A )
lowercase : List[Any] = size if size is not None else {'''shortest_edge''': 224}
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
lowercase : Dict = crop_size if crop_size is not None else {'''height''': 224, '''width''': 224}
lowercase : Dict = get_size_dict(_A , param_name='''crop_size''' )
lowercase : List[str] = do_resize
lowercase : Optional[Any] = size
lowercase : List[str] = do_center_crop
lowercase : List[Any] = crop_size
lowercase : str = resample
lowercase : Tuple = do_rescale
lowercase : Any = rescale_factor
lowercase : Tuple = do_normalize
lowercase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowercase : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Dict[str, int] , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Tuple = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" in size:
lowercase : Dict = get_resize_output_image_size(_A , size['''shortest_edge'''] , default_to_square=_A )
elif "height" in size and "width" in size:
lowercase : Union[str, Any] = (size['''height'''], size['''width'''])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(_A , size=_A , resample=_A , data_format=_A , **_A )
def __a ( self : Dict , _A : np.ndarray , _A : Dict[str, int] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Any , ) -> np.ndarray:
"""simple docstring"""
lowercase : Optional[Any] = get_size_dict(_A )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(_A , size=(size['''height'''], size['''width''']) , data_format=_A , **_A )
def __a ( self : Union[str, Any] , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
return rescale(_A , scale=_A , data_format=_A , **_A )
def __a ( self : str , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Union[str, Any] , ) -> np.ndarray:
"""simple docstring"""
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __a ( self : int , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[ChannelDimension] = ChannelDimension.FIRST , ) -> np.ndarray:
"""simple docstring"""
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
lowercase : Union[str, Any] = to_numpy_array(_A )
if do_resize:
lowercase : List[Any] = self.resize(image=_A , size=_A , resample=_A )
if do_center_crop:
lowercase : Optional[int] = self.center_crop(_A , size=_A )
if do_rescale:
lowercase : Tuple = self.rescale(image=_A , scale=_A )
if do_normalize:
lowercase : Union[str, Any] = self.normalize(image=_A , mean=_A , std=_A )
lowercase : Any = to_channel_dimension_format(_A , _A )
return image
def __a ( self : List[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : PILImageResampling = None , _A : bool = None , _A : Dict[str, int] = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : Union[str, Any] , ) -> PIL.Image.Image:
"""simple docstring"""
lowercase : str = do_resize if do_resize is not None else self.do_resize
lowercase : Optional[Any] = resample if resample is not None else self.resample
lowercase : List[str] = do_center_crop if do_center_crop is not None else self.do_center_crop
lowercase : str = do_rescale if do_rescale is not None else self.do_rescale
lowercase : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowercase : List[str] = do_normalize if do_normalize is not None else self.do_normalize
lowercase : Optional[int] = image_mean if image_mean is not None else self.image_mean
lowercase : Optional[Any] = image_std if image_std is not None else self.image_std
lowercase : str = size if size is not None else self.size
lowercase : Any = get_size_dict(_A , default_to_square=_A )
lowercase : Optional[int] = crop_size if crop_size is not None else self.crop_size
lowercase : str = get_size_dict(_A , param_name='''crop_size''' )
if not valid_images(_A ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
lowercase : Union[str, Any] = make_batched(_A )
lowercase : Dict = [
[
self._preprocess_image(
image=_A , do_resize=_A , size=_A , resample=_A , do_center_crop=_A , crop_size=_A , do_rescale=_A , rescale_factor=_A , do_normalize=_A , image_mean=_A , image_std=_A , data_format=_A , )
for img in video
]
for video in videos
]
lowercase : Tuple = {'''pixel_values''': videos}
return BatchFeature(data=_A , tensor_type=_A )
| 308
| 0
|
'''simple docstring'''
from collections.abc import Callable
from math import pi, sqrt
from random import uniform
from statistics import mean
def lowerCAmelCase_ ( snake_case_ : Dict ) -> str:
'''simple docstring'''
def is_in_circle(snake_case_ : Optional[Any] , snake_case_ : str ) -> bool:
UpperCAmelCase_ = sqrt((x**2) + (y**2) )
# Our circle has a radius of 1, so a distance
# greater than 1 would land outside the circle.
return distance_from_centre <= 1
# The proportion of guesses that landed in the circle
UpperCAmelCase_ = mean(
int(is_in_circle(uniform(-1.0 , 1.0 ) , uniform(-1.0 , 1.0 ) ) )
for _ in range(snake_case_ ) )
# The ratio of the area for circle to square is pi/4.
UpperCAmelCase_ = proportion * 4
print(f"""The estimated value of pi is {pi_estimate}""" )
print(f"""The numpy value of pi is {pi}""" )
print(f"""The total error is {abs(pi - pi_estimate )}""" )
def lowerCAmelCase_ ( snake_case_ : str , snake_case_ : Optional[int] , snake_case_ : Optional[Any] = 0.0 , snake_case_ : Any = 1.0 , ) -> float:
'''simple docstring'''
return mean(
function_to_integrate(uniform(snake_case_ , snake_case_ ) ) for _ in range(snake_case_ ) ) * (max_value - min_value)
def lowerCAmelCase_ ( snake_case_ : Tuple , snake_case_ : Union[str, Any] = 0.0 , snake_case_ : Union[str, Any] = 1.0 ) -> None:
'''simple docstring'''
def identity_function(snake_case_ : Dict ) -> float:
return x
UpperCAmelCase_ = area_under_curve_estimator(
snake_case_ , snake_case_ , snake_case_ , snake_case_ )
UpperCAmelCase_ = (max_value * max_value - min_value * min_value) / 2
print("******************" )
print(f"""Estimating area under y=x where x varies from {min_value} to {max_value}""" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {expected_value}""" )
print(f"""Total error is {abs(estimated_value - expected_value )}""" )
print("******************" )
def lowerCAmelCase_ ( snake_case_ : Dict ) -> None:
'''simple docstring'''
def function_to_integrate(snake_case_ : List[str] ) -> float:
return sqrt(4.0 - x * x )
UpperCAmelCase_ = area_under_curve_estimator(
snake_case_ , snake_case_ , 0.0 , 2.0 )
print("******************" )
print("Estimating pi using area_under_curve_estimator" )
print(f"""Estimated value is {estimated_value}""" )
print(f"""Expected value is {pi}""" )
print(f"""Total error is {abs(estimated_value - pi )}""" )
print("******************" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 1
|
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_lowerCamelCase ) , '''Tatoeba directory does not exist.''' )
class _A ( unittest.TestCase ):
@cached_property
def __a ( self : int ) -> Dict:
"""simple docstring"""
lowercase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_A )
@slow
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
self.resolver.convert_models(['''heb-eng'''] )
@slow
def __a ( self : int ) -> Tuple:
"""simple docstring"""
lowercase , lowercase : Optional[Any] = self.resolver.write_model_card('''opus-mt-he-en''' , dry_run=_A )
assert mmeta["long_pair"] == "heb-eng"
| 308
| 0
|
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowercase: Tuple = logging.get_logger(__name__)
class _lowercase ( enum.Enum ):
"""simple docstring"""
__A = 0
__A = 1
@add_end_docstrings(_lowerCamelCase )
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = '''generated'''
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*_A , **_A )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == "tf"
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ , ):
"""simple docstring"""
a = {}
if truncation is not None:
a = truncation
a = generate_kwargs
a = {}
if return_tensors is not None and return_type is None:
a = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
a = return_type
if clean_up_tokenization_spaces is not None:
a = clean_up_tokenization_spaces
if stop_sequence is not None:
a = self.tokenizer.encode(_A , add_special_tokens=_A )
if len(_A ) > 1:
warnings.warn(
"Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
" the stop sequence will be used as the stop sequence string in the interim." )
a = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
return True
def UpperCamelCase_ (self , *lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
a = self.model.config.prefix if self.model.config.prefix is not None else ''''''
if isinstance(args[0] , _A ):
if self.tokenizer.pad_token_id is None:
raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input" )
a = ([prefix + arg for arg in args[0]],)
a = True
elif isinstance(args[0] , _A ):
a = (prefix + args[0],)
a = False
else:
raise ValueError(
F''' `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`''' )
a = self.tokenizer(*_A , padding=_A , truncation=_A , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
a = super().__call__(*_A , **_A )
if (
isinstance(args[0] , _A )
and all(isinstance(_A , _A ) for el in args[0] )
and all(len(_A ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=TruncationStrategy.DO_NOT_TRUNCATE , **lowerCamelCase_ ):
"""simple docstring"""
a = self._parse_and_tokenize(_A , truncation=_A , **_A )
return inputs
def UpperCamelCase_ (self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
if self.framework == "pt":
a = model_inputs['''input_ids'''].shape
elif self.framework == "tf":
a = tf.shape(model_inputs["input_ids"] ).numpy()
a = generate_kwargs.get("min_length" , self.model.config.min_length )
a = generate_kwargs.get("max_length" , self.model.config.max_length )
self.check_inputs(_A , generate_kwargs["min_length"] , generate_kwargs["max_length"] )
a = self.model.generate(**_A , **_A )
a = output_ids.shape[0]
if self.framework == "pt":
a = output_ids.reshape(_A , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
a = tf.reshape(_A , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=ReturnType.TEXT , lowerCamelCase_=False ):
"""simple docstring"""
a = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
a = {F'''{self.return_name}_token_ids''': output_ids}
elif return_type == ReturnType.TEXT:
a = {
F'''{self.return_name}_text''': self.tokenizer.decode(
_A , skip_special_tokens=_A , clean_up_tokenization_spaces=_A , )
}
records.append(_A )
return records
@add_end_docstrings(_lowerCamelCase )
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = '''summary'''
def __call__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(*_A , **_A )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if max_length < min_length:
logger.warning(F'''Your min_length={min_length} must be inferior than your max_length={max_length}.''' )
if input_length < max_length:
logger.warning(
F'''Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is '''
"a summarization task, where outputs shorter than the input are typically wanted, you might "
F'''consider decreasing max_length manually, e.g. summarizer(\'...\', max_length={input_length//2})''' )
@add_end_docstrings(_lowerCamelCase )
class _lowercase ( _lowerCamelCase ):
"""simple docstring"""
__A = '''translation'''
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
if input_length > 0.9 * max_length:
logger.warning(
F'''Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider '''
"increasing your max_length manually, e.g. translator(\'...\', max_length=400)" )
return True
def UpperCamelCase_ (self , *lowerCamelCase_ , lowerCamelCase_=TruncationStrategy.DO_NOT_TRUNCATE , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
if getattr(self.tokenizer , "_build_translation_inputs" , _A ):
return self.tokenizer._build_translation_inputs(
*_A , return_tensors=self.framework , truncation=_A , src_lang=_A , tgt_lang=_A )
else:
return super()._parse_and_tokenize(*_A , truncation=_A )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , **lowerCamelCase_ ):
"""simple docstring"""
a = super()._sanitize_parameters(**_A )
if src_lang is not None:
a = src_lang
if tgt_lang is not None:
a = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
a = kwargs.get("task" , self.task )
a = task.split("_" )
if task and len(_A ) == 4:
# translation, XX, to YY
a = items[1]
a = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(*_A , **_A )
| 227
|
from __future__ import annotations
from typing import Any
def snake_case( __magic_name__ ) -> None:
'''simple docstring'''
create_state_space_tree(__magic_name__ , [] , 0 )
def snake_case( __magic_name__ , __magic_name__ , __magic_name__ ) -> None:
'''simple docstring'''
if index == len(__magic_name__ ):
print(__magic_name__ )
return
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.append(sequence[index] )
create_state_space_tree(__magic_name__ , __magic_name__ , index + 1 )
current_subsequence.pop()
if __name__ == "__main__":
lowerCAmelCase_ = [3, 1, 2, 4]
generate_all_subsequences(seq)
seq.clear()
seq.extend(['A', 'B', 'C'])
generate_all_subsequences(seq)
| 308
| 0
|
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase__ ( a__ : Optional[Any] , a__ : int ) -> List[str]:
UpperCamelCase_ = int(a__ )
assert noofclusters < len(a__ )
# Find out the dimensionality
UpperCamelCase_ = len(vectors[0] )
# Will help select random centroids from among the available vectors
UpperCamelCase_ = list(range(len(a__ ) ) )
shuffle(a__ )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
UpperCamelCase_ = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
UpperCamelCase_ = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
UpperCamelCase_ = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(a__ )
]
##These nodes will assign the centroid Variables the appropriate
##values
UpperCamelCase_ = tf.placeholder("""float64""" , [dim] )
UpperCamelCase_ = []
for centroid in centroids:
cent_assigns.append(tf.assign(a__ , a__ ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
UpperCamelCase_ = [tf.Variable(0 ) for i in range(len(a__ ) )]
##These nodes will assign an assignment Variable the appropriate
##value
UpperCamelCase_ = tf.placeholder("""int32""" )
UpperCamelCase_ = []
for assignment in assignments:
cluster_assigns.append(tf.assign(a__ , a__ ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
UpperCamelCase_ = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
UpperCamelCase_ = tf.reduce_mean(a__ , 0 )
##Node for computing Euclidean distances
# Placeholders for input
UpperCamelCase_ = tf.placeholder("""float""" , [dim] )
UpperCamelCase_ = tf.placeholder("""float""" , [dim] )
UpperCamelCase_ = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(a__ , a__ ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
UpperCamelCase_ = tf.placeholder("""float""" , [noofclusters] )
UpperCamelCase_ = tf.argmin(a__ , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
UpperCamelCase_ = tf.initialize_all_variables()
# Initialize all variables
sess.run(a__ )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
UpperCamelCase_ = 100
for _ in range(a__ ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(a__ ) ):
UpperCamelCase_ = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
UpperCamelCase_ = [
sess.run(a__ , feed_dict={va: vect, va: sess.run(a__ )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
UpperCamelCase_ = sess.run(
a__ , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(a__ ):
# Collect all the vectors assigned to this cluster
UpperCamelCase_ = [
vectors[i]
for i in range(len(a__ ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
UpperCamelCase_ = sess.run(
a__ , feed_dict={mean_input: array(a__ )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
UpperCamelCase_ = sess.run(a__ )
UpperCamelCase_ = sess.run(a__ )
return centroids, assignments
| 122
|
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCAmelCase_ = logging.get_logger(__name__)
class _A ( _lowerCamelCase ):
_UpperCamelCase : Dict = ['''input_features''']
def __init__( self : int , _A : int=80 , _A : Union[str, Any]=16_000 , _A : Union[str, Any]=160 , _A : Any=30 , _A : str=400 , _A : Union[str, Any]=0.0 , _A : Tuple=False , **_A : List[str] , ) -> int:
"""simple docstring"""
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , return_attention_mask=_A , **_A , )
lowercase : Optional[Any] = n_fft
lowercase : Optional[int] = hop_length
lowercase : Optional[int] = chunk_length
lowercase : Union[str, Any] = chunk_length * sampling_rate
lowercase : Optional[Any] = self.n_samples // hop_length
lowercase : Optional[Any] = sampling_rate
lowercase : Union[str, Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=8_000.0 , sampling_rate=_A , norm='''slaney''' , mel_scale='''slaney''' , )
def __a ( self : Dict , _A : np.array ) -> np.ndarray:
"""simple docstring"""
lowercase : List[str] = spectrogram(
_A , window_function(self.n_fft , '''hann''' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters , log_mel='''log10''' , )
lowercase : Union[str, Any] = log_spec[:, :-1]
lowercase : Optional[Any] = np.maximum(_A , log_spec.max() - 8.0 )
lowercase : str = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def __a ( _A : List[np.ndarray] , _A : List[np.ndarray] , _A : float = 0.0 ) -> List[np.ndarray]:
"""simple docstring"""
if attention_mask is not None:
lowercase : Optional[Any] = np.array(_A , np.intaa )
lowercase : List[str] = []
for vector, length in zip(_A , attention_mask.sum(-1 ) ):
lowercase : Optional[int] = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
lowercase : int = padding_value
normed_input_values.append(_A )
else:
lowercase : Dict = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Union[str, Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : bool = True , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = None , _A : Optional[str] = "max_length" , _A : Optional[int] = None , _A : Optional[int] = None , _A : Optional[bool] = None , **_A : int , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
f""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
f""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
lowercase : Union[str, Any] = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
lowercase : Optional[Any] = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowercase : List[str] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
lowercase : List[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowercase : Optional[int] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowercase : List[str] = [np.asarray([raw_speech] ).T]
lowercase : Tuple = BatchFeature({'''input_features''': raw_speech} )
# convert into correct format for padding
lowercase : str = self.pad(
_A , padding=_A , max_length=max_length if max_length else self.n_samples , truncation=_A , pad_to_multiple_of=_A , return_attention_mask=return_attention_mask or do_normalize , )
# zero-mean and unit-variance normalization
if do_normalize:
lowercase : Tuple = self.zero_mean_unit_var_norm(
padded_inputs['''input_features'''] , attention_mask=padded_inputs['''attention_mask'''] , padding_value=self.padding_value , )
lowercase : str = np.stack(padded_inputs['''input_features'''] , axis=0 )
# make sure list is in array format
lowercase : List[str] = padded_inputs.get('''input_features''' ).transpose(2 , 0 , 1 )
lowercase : str = [self._np_extract_fbank_features(_A ) for waveform in input_features[0]]
if isinstance(input_features[0] , _A ):
lowercase : int = [np.asarray(_A , dtype=np.floataa ) for feature in input_features]
else:
lowercase : Optional[int] = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
lowercase : List[str] = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
lowercase : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs
def __a ( self : Optional[Any] ) -> Dict[str, Any]:
"""simple docstring"""
lowercase : Optional[Any] = copy.deepcopy(self.__dict__ )
lowercase : Dict = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 308
| 0
|
from math import pi, sqrt, tan
def a ( snake_case__: Any ):
'''simple docstring'''
if side_length < 0:
raise ValueError('''surface_area_cube() only accepts non-negative values''' )
return 6 * side_length**2
def a ( snake_case__: Optional[Any] , snake_case__: Dict , snake_case__: Optional[int] ):
'''simple docstring'''
if length < 0 or breadth < 0 or height < 0:
raise ValueError('''surface_area_cuboid() only accepts non-negative values''' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_sphere() only accepts non-negative values''' )
return 4 * pi * radius**2
def a ( snake_case__: List[Any] ):
'''simple docstring'''
if radius < 0:
raise ValueError('''surface_area_hemisphere() only accepts non-negative values''' )
return 3 * pi * radius**2
def a ( snake_case__: Dict , snake_case__: Dict ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cone() only accepts non-negative values''' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def a ( snake_case__: List[Any] , snake_case__: str , snake_case__: List[Any] ):
'''simple docstring'''
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'''surface_area_conical_frustum() only accepts non-negative values''' )
lowercase_ = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def a ( snake_case__: Union[str, Any] , snake_case__: Optional[int] ):
'''simple docstring'''
if radius < 0 or height < 0:
raise ValueError('''surface_area_cylinder() only accepts non-negative values''' )
return 2 * pi * radius * (height + radius)
def a ( snake_case__: Optional[Any] , snake_case__: List[Any] ):
'''simple docstring'''
if torus_radius < 0 or tube_radius < 0:
raise ValueError('''surface_area_torus() only accepts non-negative values''' )
if torus_radius < tube_radius:
raise ValueError(
'''surface_area_torus() does not support spindle or self intersecting tori''' )
return 4 * pow(snake_case__ , 2 ) * torus_radius * tube_radius
def a ( snake_case__: Optional[int] , snake_case__: Union[str, Any] ):
'''simple docstring'''
if length < 0 or width < 0:
raise ValueError('''area_rectangle() only accepts non-negative values''' )
return length * width
def a ( snake_case__: Optional[int] ):
'''simple docstring'''
if side_length < 0:
raise ValueError('''area_square() only accepts non-negative values''' )
return side_length**2
def a ( snake_case__: str , snake_case__: List[str] ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_triangle() only accepts non-negative values''' )
return (base * height) / 2
def a ( snake_case__: List[Any] , snake_case__: int , snake_case__: Tuple ):
'''simple docstring'''
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('''area_triangle_three_sides() only accepts non-negative values''' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('''Given three sides do not form a triangle''' )
lowercase_ = (sidea + sidea + sidea) / 2
lowercase_ = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def a ( snake_case__: List[Any] , snake_case__: Dict ):
'''simple docstring'''
if base < 0 or height < 0:
raise ValueError('''area_parallelogram() only accepts non-negative values''' )
return base * height
def a ( snake_case__: Any , snake_case__: Tuple , snake_case__: List[str] ):
'''simple docstring'''
if basea < 0 or basea < 0 or height < 0:
raise ValueError('''area_trapezium() only accepts non-negative values''' )
return 1 / 2 * (basea + basea) * height
def a ( snake_case__: str ):
'''simple docstring'''
if radius < 0:
raise ValueError('''area_circle() only accepts non-negative values''' )
return pi * radius**2
def a ( snake_case__: Optional[int] , snake_case__: str ):
'''simple docstring'''
if radius_x < 0 or radius_y < 0:
raise ValueError('''area_ellipse() only accepts non-negative values''' )
return pi * radius_x * radius_y
def a ( snake_case__: int , snake_case__: int ):
'''simple docstring'''
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('''area_rhombus() only accepts non-negative values''' )
return 1 / 2 * diagonal_a * diagonal_a
def a ( snake_case__: List[str] , snake_case__: List[str] ):
'''simple docstring'''
if not isinstance(snake_case__ , snake_case__ ) or sides < 3:
raise ValueError(
'''area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides''' )
elif length < 0:
raise ValueError(
'''area_reg_polygon() only accepts non-negative values as \
length of a side''' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('[DEMO] Areas of various geometric shapes: \n')
print(f"Rectangle: {area_rectangle(1_0, 2_0) = }")
print(f"Square: {area_square(1_0) = }")
print(f"Triangle: {area_triangle(1_0, 1_0) = }")
print(f"Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }")
print(f"Parallelogram: {area_parallelogram(1_0, 2_0) = }")
print(f"Rhombus: {area_rhombus(1_0, 2_0) = }")
print(f"Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }")
print(f"Circle: {area_circle(2_0) = }")
print(f"Ellipse: {area_ellipse(1_0, 2_0) = }")
print('\nSurface Areas of various geometric shapes: \n')
print(f"Cube: {surface_area_cube(2_0) = }")
print(f"Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }")
print(f"Sphere: {surface_area_sphere(2_0) = }")
print(f"Hemisphere: {surface_area_hemisphere(2_0) = }")
print(f"Cone: {surface_area_cone(1_0, 2_0) = }")
print(f"Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }")
print(f"Cylinder: {surface_area_cylinder(1_0, 2_0) = }")
print(f"Torus: {surface_area_torus(2_0, 1_0) = }")
print(f"Equilateral Triangle: {area_reg_polygon(3, 1_0) = }")
print(f"Square: {area_reg_polygon(4, 1_0) = }")
print(f"Reqular Pentagon: {area_reg_polygon(5, 1_0) = }")
| 30
|
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class _A :
def __init__( self : int , _A : Optional[int] , _A : Any=13 , _A : List[Any]=7 , _A : List[Any]=True , _A : Optional[Any]=True , _A : str=True , _A : Any=True , _A : Dict=True , _A : Optional[Any]=False , _A : Any=False , _A : List[str]=False , _A : Optional[int]=2 , _A : List[Any]=99 , _A : str=0 , _A : Dict=32 , _A : Dict=5 , _A : List[Any]=4 , _A : Optional[Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[Any]=512 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Optional[int]=2 , _A : Tuple=4 , _A : List[Any]="last" , _A : List[str]=True , _A : Tuple=None , _A : Optional[Any]=0 , ) -> Any:
"""simple docstring"""
lowercase : str = parent
lowercase : Optional[Any] = batch_size
lowercase : Union[str, Any] = seq_length
lowercase : str = is_training
lowercase : str = use_input_lengths
lowercase : List[Any] = use_token_type_ids
lowercase : Union[str, Any] = use_labels
lowercase : Tuple = gelu_activation
lowercase : Dict = sinusoidal_embeddings
lowercase : Any = causal
lowercase : str = asm
lowercase : Optional[Any] = n_langs
lowercase : Dict = vocab_size
lowercase : Dict = n_special
lowercase : List[Any] = hidden_size
lowercase : str = num_hidden_layers
lowercase : int = num_attention_heads
lowercase : str = hidden_dropout_prob
lowercase : Dict = attention_probs_dropout_prob
lowercase : List[Any] = max_position_embeddings
lowercase : Optional[int] = type_sequence_label_size
lowercase : List[str] = initializer_range
lowercase : List[str] = num_labels
lowercase : int = num_choices
lowercase : int = summary_type
lowercase : Tuple = use_proj
lowercase : Union[str, Any] = scope
lowercase : List[str] = bos_token_id
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowercase : str = None
if self.use_input_lengths:
lowercase : int = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
lowercase : Union[str, Any] = None
if self.use_token_type_ids:
lowercase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
lowercase : Union[str, Any] = None
lowercase : List[str] = None
lowercase : Optional[Any] = None
if self.use_labels:
lowercase : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowercase : Tuple = ids_tensor([self.batch_size] , 2 ).float()
lowercase : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowercase : List[Any] = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
return XLMConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , num_labels=self.num_labels , bos_token_id=self.bos_token_id , )
def __a ( self : int , _A : str , _A : Optional[Any] , _A : int , _A : List[str] , _A : Any , _A : Dict , _A : Tuple , _A : Union[str, Any] , _A : Tuple , ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = XLMModel(config=_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , lengths=_A , langs=_A )
lowercase : Dict = model(_A , langs=_A )
lowercase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : int , _A : Dict , _A : int , _A : int , _A : Union[str, Any] , _A : Tuple , _A : Union[str, Any] , _A : Any , _A : Union[str, Any] , _A : Dict , ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel(_A )
model.to(_A )
model.eval()
lowercase : Tuple = model(_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __a ( self : Union[str, Any] , _A : List[str] , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[Any] , _A : int , _A : Union[str, Any] , _A : Tuple , _A : int , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Dict = XLMForQuestionAnsweringSimple(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Any = model(_A , start_positions=_A , end_positions=_A )
lowercase : Any = outputs
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __a ( self : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : List[Any] , _A : Union[str, Any] , _A : List[str] , _A : Any , _A : Any , _A : str , _A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
lowercase : Optional[int] = XLMForQuestionAnswering(_A )
model.to(_A )
model.eval()
lowercase : Any = model(_A )
lowercase : Tuple = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , p_mask=_A , )
lowercase : Optional[int] = model(
_A , start_positions=_A , end_positions=_A , cls_index=_A , is_impossible=_A , )
((lowercase) , ) : Optional[int] = result_with_labels.to_tuple()
lowercase : List[str] = model(_A , start_positions=_A , end_positions=_A )
((lowercase) , ) : Any = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __a ( self : Union[str, Any] , _A : Optional[int] , _A : Dict , _A : int , _A : List[Any] , _A : List[str] , _A : Optional[Any] , _A : Dict , _A : Optional[int] , _A : str , ) -> int:
"""simple docstring"""
lowercase : List[str] = XLMForSequenceClassification(_A )
model.to(_A )
model.eval()
lowercase : List[str] = model(_A )
lowercase : Union[str, Any] = model(_A , labels=_A )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __a ( self : Union[str, Any] , _A : str , _A : int , _A : List[str] , _A : Optional[int] , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Any , _A : Tuple , ) -> Dict:
"""simple docstring"""
lowercase : Optional[Any] = self.num_labels
lowercase : Tuple = XLMForTokenClassification(_A )
model.to(_A )
model.eval()
lowercase : str = model(_A , attention_mask=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __a ( self : List[Any] , _A : List[str] , _A : Dict , _A : str , _A : List[str] , _A : List[str] , _A : Union[str, Any] , _A : Tuple , _A : Any , _A : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase : int = self.num_choices
lowercase : List[Any] = XLMForMultipleChoice(config=_A )
model.to(_A )
model.eval()
lowercase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowercase : Dict = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = self.prepare_config_and_inputs()
(
(
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) , (
lowercase
) ,
) : Union[str, Any] = config_and_inputs
lowercase : Optional[int] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''lengths''': input_lengths}
return config, inputs_dict
@require_torch
class _A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , unittest.TestCase ):
_UpperCamelCase : Any = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_UpperCamelCase : str = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_UpperCamelCase : Tuple = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def __a ( self : List[Any] , _A : Tuple , _A : List[str] , _A : Dict , _A : Union[str, Any] , _A : Optional[Any] ) -> List[Any]:
"""simple docstring"""
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __a ( self : Dict , _A : Tuple , _A : List[str] , _A : int=False ) -> Optional[Any]:
"""simple docstring"""
lowercase : List[str] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
lowercase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
lowercase : str = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __a ( self : Any ) -> List[str]:
"""simple docstring"""
lowercase : List[str] = XLMModelTester(self )
lowercase : Any = ConfigTester(self , config_class=_A , emb_dim=37 )
def __a ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def __a ( self : Tuple ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*_A )
def __a ( self : Any ) -> Dict:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*_A )
def __a ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
lowercase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*_A )
def __a ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
lowercase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*_A )
def __a ( self : List[str] ) -> Union[str, Any]:
"""simple docstring"""
lowercase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*_A )
def __a ( self : Dict ) -> int:
"""simple docstring"""
lowercase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*_A )
def __a ( self : Any ) -> List[Any]:
"""simple docstring"""
lowercase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*_A )
def __a ( self : int , _A : Union[str, Any] , _A : int , _A : Union[str, Any] , _A : Optional[Any] , _A : List[Any] , _A : List[Any]=False , _A : Optional[int]=1 ) -> Any:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_attentions in attentions] , [True] * len(_A ) )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(_A ):
# adds PAD dummy token
lowercase : List[Any] = min_length + idx + 1
lowercase : str = min_length + idx + 1
lowercase : Any = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] , [expected_shape] * len(_A ) )
def __a ( self : int , _A : Optional[int] , _A : Dict , _A : Any , _A : List[str] , _A : Optional[int] , _A : List[Any]=False , _A : List[Any]=1 ) -> str:
"""simple docstring"""
self.assertIsInstance(_A , _A )
self.assertListEqual(
[isinstance(_A , _A ) for iter_hidden_states in hidden_states] , [True] * len(_A ) , )
self.assertEqual(len(_A ) , (max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(_A ):
# adds PAD dummy token
lowercase : Union[str, Any] = min_length + idx + 1
lowercase : Optional[Any] = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] , [expected_shape] * len(_A ) , )
pass
@slow
def __a ( self : Optional[int] ) -> Any:
"""simple docstring"""
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase : Any = XLMModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _A ( unittest.TestCase ):
@slow
def __a ( self : Any ) -> Optional[Any]:
"""simple docstring"""
lowercase : Optional[int] = XLMWithLMHeadModel.from_pretrained('''xlm-mlm-en-2048''' )
model.to(_A )
lowercase : str = torch.tensor([[14, 447]] , dtype=torch.long , device=_A ) # the president
lowercase : List[str] = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
lowercase : Dict = model.generate(_A , do_sample=_A )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() , _A )
| 308
| 0
|
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
A: int = None
A: List[Any] = logging.get_logger(__name__)
A: int = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
A: List[str] = {
"vocab_file": {
"google/bigbird-roberta-base": "https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model",
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"
),
},
"tokenizer_file": {
"google/bigbird-roberta-base": (
"https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json"
),
"google/bigbird-roberta-large": (
"https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json"
),
"google/bigbird-base-trivia-itc": (
"https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json"
),
},
}
A: Optional[int] = {
"google/bigbird-roberta-base": 4_0_9_6,
"google/bigbird-roberta-large": 4_0_9_6,
"google/bigbird-base-trivia-itc": 4_0_9_6,
}
A: Tuple = "▁"
class SCREAMING_SNAKE_CASE__ ( _lowerCamelCase ):
__lowerCAmelCase : Dict = VOCAB_FILES_NAMES
__lowerCAmelCase : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = BigBirdTokenizer
__lowerCAmelCase : List[Any] = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase : List[int] = []
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE="[CLS]" , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
UpperCAmelCase : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
UpperCAmelCase : List[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
UpperCAmelCase : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
UpperCAmelCase : List[str] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
UpperCAmelCase : int = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
_A , tokenizer_file=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , **_A , )
UpperCAmelCase : Union[str, Any] = vocab_file
UpperCAmelCase : Optional[Any] = False if not self.vocab_file else True
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[str] = [self.sep_token_id]
UpperCAmelCase : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1] + ([0] * len(_A )) + [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = [self.sep_token_id]
UpperCAmelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_A ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : Optional[int] = os.path.join(
_A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,)
| 109
|
def snake_case( __magic_name__ = 50 ) -> int:
'''simple docstring'''
lowercase : Union[str, Any] = [1] * (length + 1)
for row_length in range(length + 1 ):
for tile_length in range(2 , 5 ):
for tile_start in range(row_length - tile_length + 1 ):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
UpperCAmelCase_ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['BartphoTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 12
|
import os
def snake_case( __magic_name__ = "input.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(__magic_name__ ) , __magic_name__ ) ) as input_file:
lowercase : Any = [
[int(__magic_name__ ) for element in line.split(''',''' )]
for line in input_file.readlines()
]
lowercase : List[Any] = len(__magic_name__ )
lowercase : Any = len(matrix[0] )
lowercase : Tuple = [[-1 for _ in range(__magic_name__ )] for _ in range(__magic_name__ )]
for i in range(__magic_name__ ):
lowercase : str = matrix[i][0]
for j in range(1 , __magic_name__ ):
for i in range(__magic_name__ ):
lowercase : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , __magic_name__ ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
lowercase : Any = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 308
| 0
|
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
_lowerCAmelCase : int = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def __snake_case ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Tuple , _lowerCAmelCase : Tuple , _lowerCAmelCase : Optional[Any]=False , ) -> List[str]:
output_path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , enable_onnx_checker=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
else:
export(
_lowerCAmelCase , _lowerCAmelCase , f=output_path.as_posix() , input_names=_lowerCAmelCase , output_names=_lowerCAmelCase , dynamic_axes=_lowerCAmelCase , do_constant_folding=_lowerCAmelCase , opset_version=_lowerCAmelCase , )
@torch.no_grad()
def __snake_case ( _lowerCAmelCase : List[str] , _lowerCAmelCase : int , _lowerCAmelCase : Dict , _lowerCAmelCase : Any = False ) -> Optional[Any]:
A_ : str = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
A_ : Union[str, Any] = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
A_ : int = '''cpu'''
A_ : Any = StableDiffusionPipeline.from_pretrained(_lowerCAmelCase , torch_dtype=_lowerCAmelCase ).to(_lowerCAmelCase )
A_ : Tuple = Path(_lowerCAmelCase )
# TEXT ENCODER
A_ : List[Any] = pipeline.text_encoder.config.max_position_embeddings
A_ : Optional[Any] = pipeline.text_encoder.config.hidden_size
A_ : Dict = pipeline.tokenizer(
"A sample prompt" , padding="max_length" , max_length=pipeline.tokenizer.model_max_length , truncation=_lowerCAmelCase , return_tensors="pt" , )
onnx_export(
pipeline.text_encoder , model_args=(text_input.input_ids.to(device=_lowerCAmelCase , dtype=torch.intaa )) , output_path=output_path / "text_encoder" / "model.onnx" , ordered_input_names=["input_ids"] , output_names=["last_hidden_state", "pooler_output"] , dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} , opset=_lowerCAmelCase , )
del pipeline.text_encoder
# UNET
A_ : Tuple = pipeline.unet.config.in_channels
A_ : str = pipeline.unet.config.sample_size
A_ : str = output_path / '''unet''' / '''model.onnx'''
onnx_export(
pipeline.unet , model_args=(
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(2 , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=_lowerCAmelCase , ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] , output_names=["out_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} , opset=_lowerCAmelCase , use_external_data_format=_lowerCAmelCase , )
A_ : List[str] = str(unet_path.absolute().as_posix() )
A_ : Optional[int] = os.path.dirname(_lowerCAmelCase )
A_ : Optional[Any] = onnx.load(_lowerCAmelCase )
# clean up existing tensor files
shutil.rmtree(_lowerCAmelCase )
os.mkdir(_lowerCAmelCase )
# collate external tensor files into one
onnx.save_model(
_lowerCAmelCase , _lowerCAmelCase , save_as_external_data=_lowerCAmelCase , all_tensors_to_one_file=_lowerCAmelCase , location="weights.pb" , convert_attribute=_lowerCAmelCase , )
del pipeline.unet
# VAE ENCODER
A_ : Union[str, Any] = pipeline.vae
A_ : Tuple = vae_encoder.config.in_channels
A_ : Any = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
A_ : int = lambda _lowerCAmelCase , _lowerCAmelCase : vae_encoder.encode(_lowerCAmelCase , _lowerCAmelCase )[0].sample()
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / "vae_encoder" / "model.onnx" , ordered_input_names=["sample", "return_dict"] , output_names=["latent_sample"] , dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_lowerCAmelCase , )
# VAE DECODER
A_ : int = pipeline.vae
A_ : Union[str, Any] = vae_decoder.config.latent_channels
A_ : List[str] = vae_decoder.config.out_channels
# forward only through the decoder part
A_ : Optional[Any] = vae_encoder.decode
onnx_export(
_lowerCAmelCase , model_args=(
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
False,
) , output_path=output_path / "vae_decoder" / "model.onnx" , ordered_input_names=["latent_sample", "return_dict"] , output_names=["sample"] , dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} , opset=_lowerCAmelCase , )
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
A_ : Dict = pipeline.safety_checker
A_ : Union[str, Any] = safety_checker.config.vision_config.num_channels
A_ : str = safety_checker.config.vision_config.image_size
A_ : Union[str, Any] = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker , model_args=(
torch.randn(
1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
torch.randn(1 , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).to(device=_lowerCAmelCase , dtype=_lowerCAmelCase ),
) , output_path=output_path / "safety_checker" / "model.onnx" , ordered_input_names=["clip_input", "images"] , output_names=["out_images", "has_nsfw_concepts"] , dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} , opset=_lowerCAmelCase , )
del pipeline.safety_checker
A_ : Dict = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
A_ : Dict = pipeline.feature_extractor
else:
A_ : int = None
A_ : List[Any] = None
A_ : Dict = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) , vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) , text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) , tokenizer=pipeline.tokenizer , unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) , scheduler=pipeline.scheduler , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , requires_safety_checker=safety_checker is not None , )
onnx_pipeline.save_pretrained(_lowerCAmelCase )
print("ONNX pipeline saved to" , _lowerCAmelCase )
del pipeline
del onnx_pipeline
A_ : Optional[Any] = OnnxStableDiffusionPipeline.from_pretrained(_lowerCAmelCase , provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
_lowerCAmelCase : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 300
|
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import tensorflow as tf
from transformers import AutoTokenizer, TFAutoModelForSeqaSeqLM
@require_tf
@require_sentencepiece
@require_tokenizers
class _A ( unittest.TestCase ):
@slow
def __a ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
lowercase : List[Any] = TFAutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' )
lowercase : int = AutoTokenizer.from_pretrained('''google/mt5-small''' )
lowercase : Optional[Any] = tokenizer('''Hello there''' , return_tensors='''tf''' ).input_ids
lowercase : Dict = tokenizer('''Hi I am''' , return_tensors='''tf''' ).input_ids
lowercase : List[Any] = model(_A , labels=_A ).loss
lowercase : Dict = -tf.math.reduce_mean(_A ).numpy()
lowercase : Union[str, Any] = -21.228_168
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 2E-4 )
| 308
| 0
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list , _lowerCamelCase : list , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
if index == number_of_items:
return 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : str = 0
_lowerCAmelCase : Optional[int] = knapsack(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 )
if weights[index] <= max_weight:
_lowerCAmelCase : Dict = values[index] + knapsack(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , max_weight - weights[index] , index + 1 )
return max(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class a_ :
def __init__( self , snake_case_=2 , snake_case_=3 , snake_case_=6_4 , snake_case_=None ):
_lowerCAmelCase : Union[str, Any] = np.random.default_rng(snake_case_ )
_lowerCAmelCase : Optional[Any] = length
_lowerCAmelCase : int = rng.normal(size=(length,) ).astype(np.floataa )
_lowerCAmelCase : Any = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self ):
return self.length
def __getitem__( self , snake_case_ ):
return {"x": self.x[i], "y": self.y[i]}
class a_ (torch.nn.Module ):
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ):
super().__init__()
_lowerCAmelCase : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCAmelCase : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
_lowerCAmelCase : str = True
def __UpperCamelCase ( self , snake_case_=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCAmelCase : str = False
return x * self.a[0] + self.b[0]
class a_ (torch.nn.Module ):
def __init__( self , snake_case_=0 , snake_case_=0 , snake_case_=False ):
super().__init__()
_lowerCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
_lowerCAmelCase : Tuple = torch.nn.Parameter(torch.tensor(snake_case_ ).float() )
_lowerCAmelCase : Dict = True
def __UpperCamelCase ( self , snake_case_=None ):
if self.first_batch:
print(f'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
_lowerCAmelCase : List[Any] = False
return x * self.a + self.b
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int = 16 ) -> Tuple:
from datasets import load_dataset
from transformers import AutoTokenizer
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : Union[str, Any] = {"""train""": """tests/test_samples/MRPC/train.csv""", """validation""": """tests/test_samples/MRPC/dev.csv"""}
_lowerCAmelCase : Optional[Any] = load_dataset("""csv""" , data_files=_lowerCamelCase )
_lowerCAmelCase : List[str] = datasets["""train"""].unique("""label""" )
_lowerCAmelCase : Dict = {v: i for i, v in enumerate(_lowerCamelCase )}
def tokenize_function(_lowerCamelCase : int ):
# max_length=None => use the model max length (it's actually the default)
_lowerCAmelCase : Any = tokenizer(
examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase , padding="""max_length""" )
if "label" in examples:
_lowerCAmelCase : Tuple = [label_to_id[l] for l in examples["""label"""]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_lowerCAmelCase : Optional[Any] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""sentence1""", """sentence2""", """label"""] , )
def collate_fn(_lowerCamelCase : Tuple ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding="""max_length""" , max_length=1_28 , return_tensors="""pt""" )
return tokenizer.pad(_lowerCamelCase , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_lowerCAmelCase : int = DataLoader(tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=2 )
_lowerCAmelCase : int = DataLoader(tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=1 )
return train_dataloader, eval_dataloader
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
UpperCamelCase_ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
UpperCamelCase_ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
UpperCamelCase_ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ (datasets.Metric ):
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/krishnap25/mauve""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/krishnap25/mauve"""] , reference_urls=[
"""https://arxiv.org/abs/2102.01454""",
"""https://github.com/krishnap25/mauve""",
] , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="auto" , snake_case_=-1 , snake_case_=0.9 , snake_case_=5 , snake_case_=5_0_0 , snake_case_="gpt2-large" , snake_case_=-1 , snake_case_=1_0_2_4 , snake_case_=2_5 , snake_case_=5 , snake_case_=True , snake_case_=2_5 , ):
_lowerCAmelCase : int = compute_mauve(
p_text=snake_case_ , q_text=snake_case_ , p_features=snake_case_ , q_features=snake_case_ , p_tokens=snake_case_ , q_tokens=snake_case_ , num_buckets=snake_case_ , pca_max_data=snake_case_ , kmeans_explained_var=snake_case_ , kmeans_num_redo=snake_case_ , kmeans_max_iter=snake_case_ , featurize_model_name=snake_case_ , device_id=snake_case_ , max_text_length=snake_case_ , divergence_curve_discretization_size=snake_case_ , mauve_scaling_factor=snake_case_ , verbose=snake_case_ , seed=snake_case_ , )
return out
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
UpperCamelCase_ = None
UpperCamelCase_ = """<""" if sys.byteorder == """little""" else """>"""
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
UpperCamelCase_ = [
np.dtype("""|b1"""),
np.dtype("""|u1"""),
np.dtype("""<u2"""),
np.dtype(""">u2"""),
np.dtype("""<i2"""),
np.dtype(""">i2"""),
np.dtype("""<u4"""),
np.dtype(""">u4"""),
np.dtype("""<i4"""),
np.dtype(""">i4"""),
np.dtype("""<f4"""),
np.dtype(""">f4"""),
np.dtype("""<f8"""),
np.dtype(""">f8"""),
]
@dataclass
class a_ :
__lowerCAmelCase : bool = True
__lowerCAmelCase : Optional[str] = None
# Automatically constructed
__lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
__lowerCAmelCase : ClassVar[Any] = pa.struct({"""bytes""": pa.binary(), """path""": pa.string()} )
__lowerCAmelCase : str = field(default="""Image""" , init=_a , repr=_a )
def __call__( self ):
return self.pa_type
def __UpperCamelCase ( self , snake_case_ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = np.array(snake_case_ )
if isinstance(snake_case_ , snake_case_ ):
return {"path": value, "bytes": None}
elif isinstance(snake_case_ , snake_case_ ):
return {"path": None, "bytes": value}
elif isinstance(snake_case_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(snake_case_ )
elif isinstance(snake_case_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(snake_case_ )
elif value.get("""path""" ) is not None and os.path.isfile(value["""path"""] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("""path""" )}
elif value.get("""bytes""" ) is not None or value.get("""path""" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("""bytes""" ), "path": value.get("""path""" )}
else:
raise ValueError(
f'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.' )
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
if not self.decode:
raise RuntimeError("""Decoding is disabled for this feature. Please use Image(decode=True) instead.""" )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support decoding images, please install 'Pillow'.""" )
if token_per_repo_id is None:
_lowerCAmelCase : List[str] = {}
_lowerCAmelCase , _lowerCAmelCase : List[Any] = value["""path"""], value["""bytes"""]
if bytes_ is None:
if path is None:
raise ValueError(f'An image should have one of \'path\' or \'bytes\' but both are None in {value}.' )
else:
if is_local_path(snake_case_ ):
_lowerCAmelCase : Dict = PIL.Image.open(snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = path.split("""::""" )[-1]
try:
_lowerCAmelCase : List[str] = string_to_dict(snake_case_ , config.HUB_DATASETS_URL )["""repo_id"""]
_lowerCAmelCase : Any = token_per_repo_id.get(snake_case_ )
except ValueError:
_lowerCAmelCase : Tuple = None
with xopen(snake_case_ , """rb""" , use_auth_token=snake_case_ ) as f:
_lowerCAmelCase : Any = BytesIO(f.read() )
_lowerCAmelCase : List[Any] = PIL.Image.open(bytes_ )
else:
_lowerCAmelCase : List[Any] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __UpperCamelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("""binary""" ),
"path": Value("""string""" ),
}
)
def __UpperCamelCase ( self , snake_case_ ):
if pa.types.is_string(storage.type ):
_lowerCAmelCase : int = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
_lowerCAmelCase : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowerCAmelCase : Optional[Any] = pa.array([None] * len(snake_case_ ) , type=pa.string() )
_lowerCAmelCase : List[Any] = pa.StructArray.from_arrays([storage, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("""bytes""" ) >= 0:
_lowerCAmelCase : List[str] = storage.field("""bytes""" )
else:
_lowerCAmelCase : List[Any] = pa.array([None] * len(snake_case_ ) , type=pa.binary() )
if storage.type.get_field_index("""path""" ) >= 0:
_lowerCAmelCase : Union[str, Any] = storage.field("""path""" )
else:
_lowerCAmelCase : List[Any] = pa.array([None] * len(snake_case_ ) , type=pa.string() )
_lowerCAmelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_lowerCAmelCase : Union[str, Any] = pa.array(
[encode_np_array(np.array(snake_case_ ) )["""bytes"""] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_lowerCAmelCase : str = pa.array([None] * len(snake_case_ ) , type=pa.string() )
_lowerCAmelCase : Union[str, Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(snake_case_ , self.pa_type )
def __UpperCamelCase ( self , snake_case_ ):
@no_op_if_value_is_null
def path_to_bytes(snake_case_ ):
with xopen(snake_case_ , """rb""" ) as f:
_lowerCAmelCase : int = f.read()
return bytes_
_lowerCAmelCase : int = pa.array(
[
(path_to_bytes(x["""path"""] ) if x["""bytes"""] is None else x["""bytes"""]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowerCAmelCase : int = pa.array(
[os.path.basename(snake_case_ ) if path is not None else None for path in storage.field("""path""" ).to_pylist()] , type=pa.string() , )
_lowerCAmelCase : List[str] = pa.StructArray.from_arrays([bytes_array, path_array] , ["""bytes""", """path"""] , mask=bytes_array.is_null() )
return array_cast(snake_case_ , self.pa_type )
def _UpperCAmelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_lowerCAmelCase : str = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def _UpperCAmelCase ( _lowerCamelCase : "PIL.Image.Image" ) -> bytes:
_lowerCAmelCase : Any = BytesIO()
if image.format in list_image_compression_formats():
_lowerCAmelCase : Union[str, Any] = image.format
else:
_lowerCAmelCase : Tuple = """PNG""" if image.mode in ["""1""", """L""", """LA""", """RGB""", """RGBA"""] else """TIFF"""
image.save(_lowerCamelCase , format=_lowerCamelCase )
return buffer.getvalue()
def _UpperCAmelCase ( _lowerCamelCase : "PIL.Image.Image" ) -> dict:
if hasattr(_lowerCamelCase , """filename""" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )}
def _UpperCAmelCase ( _lowerCamelCase : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
_lowerCAmelCase : Any = array.dtype
_lowerCAmelCase : Optional[int] = dtype.byteorder if dtype.byteorder != """=""" else _NATIVE_BYTEORDER
_lowerCAmelCase : Optional[Any] = dtype.kind
_lowerCAmelCase : Union[str, Any] = dtype.itemsize
_lowerCAmelCase : str = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_lowerCAmelCase : Tuple = np.dtype("""|u1""" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_lowerCAmelCase : int = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_lowerCAmelCase : List[Any] = dtype_byteorder + dtype_kind + str(_lowerCamelCase )
_lowerCAmelCase : List[Any] = np.dtype(_lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
_lowerCAmelCase : int = PIL.Image.fromarray(array.astype(_lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(_lowerCamelCase )}
def _UpperCAmelCase ( _lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("""To support encoding images, please install 'Pillow'.""" )
if objs:
_lowerCAmelCase , _lowerCAmelCase : str = first_non_null_value(_lowerCamelCase )
if isinstance(_lowerCamelCase , _lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(_lowerCamelCase , np.ndarray ):
_lowerCAmelCase : List[str] = no_op_if_value_is_null(_lowerCamelCase )
return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs]
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
_lowerCAmelCase : Union[str, Any] = no_op_if_value_is_null(_lowerCamelCase )
return [obj_to_image_dict_func(_lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = {"""facebook/bart-base""": BartForConditionalGeneration}
UpperCamelCase_ = {"""facebook/bart-base""": BartTokenizer}
def _UpperCAmelCase ( ) -> Any:
_lowerCAmelCase : Union[str, Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_lowerCamelCase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_lowerCamelCase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowerCamelCase , )
parser.add_argument(
"""--config_name""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_lowerCamelCase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_lowerCamelCase , default=_lowerCamelCase , help="""Where to store the final ONNX file.""" )
_lowerCAmelCase : Union[str, Any] = parser.parse_args()
return args
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int]="cpu" ) -> Dict:
_lowerCAmelCase : Union[str, Any] = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
_lowerCAmelCase : str = 0
_lowerCAmelCase : Any = None
_lowerCAmelCase : List[str] = 0
return huggingface_model, tokenizer
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : List[str] , _lowerCamelCase : Tuple ) -> Any:
model.eval()
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : int = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
_lowerCAmelCase : Any = """My friends are cool but they eat too many carbs."""
_lowerCAmelCase : str = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=10_24 , return_tensors="""pt""" ).to(model.device )
_lowerCAmelCase : Any = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_lowerCamelCase , max_length=_lowerCamelCase , early_stopping=_lowerCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowerCamelCase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowerCamelCase , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_lowerCamelCase , )
logger.info("""Model exported to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : List[Any] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowerCamelCase ) )
_lowerCAmelCase : Optional[Any] = onnxruntime.InferenceSession(_lowerCamelCase )
_lowerCAmelCase : Any = ort_sess.run(
_lowerCamelCase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowerCamelCase ),
"""max_length""": np.array(_lowerCamelCase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : str = parse_args()
_lowerCAmelCase : List[str] = 5
_lowerCAmelCase : int = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowerCAmelCase : Union[str, Any] = torch.device(args.device )
_lowerCAmelCase , _lowerCAmelCase : List[str] = load_model_tokenizer(args.model_name_or_path , _lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowerCamelCase )
if args.max_length:
_lowerCAmelCase : Tuple = args.max_length
if args.num_beams:
_lowerCAmelCase : List[str] = args.num_beams
if args.output_file_path:
_lowerCAmelCase : Optional[Any] = args.output_file_path
else:
_lowerCAmelCase : List[Any] = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
UpperCamelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
UpperCamelCase_ = """
Examples:
```py
>>> from diffusers import KandinskyV22Pipeline, KandinskyV22PriorPipeline
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> zero_image_emb = out.negative_image_embeds
>>> pipe = KandinskyV22Pipeline.from_pretrained(\"kandinsky-community/kandinsky-2-2-decoder\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=50,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : str=8 ) -> List[str]:
_lowerCAmelCase : Dict = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCAmelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class a_ (_a ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , ):
super().__init__()
self.register_modules(
unet=snake_case_ , scheduler=snake_case_ , movq=snake_case_ , )
_lowerCAmelCase : Optional[Any] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if latents is None:
_lowerCAmelCase : Tuple = randn_tensor(snake_case_ , generator=snake_case_ , device=snake_case_ , dtype=snake_case_ )
else:
if latents.shape != shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {shape}' )
_lowerCAmelCase : Union[str, Any] = latents.to(snake_case_ )
_lowerCAmelCase : int = latents * scheduler.init_noise_sigma
return latents
def __UpperCamelCase ( self , snake_case_=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_lowerCAmelCase : str = torch.device(f'cuda:{gpu_id}' )
_lowerCAmelCase : List[str] = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(snake_case_ , snake_case_ )
def __UpperCamelCase ( self , snake_case_=0 ):
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_lowerCAmelCase : Any = torch.device(f'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=snake_case_ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCAmelCase : Tuple = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCAmelCase , _lowerCAmelCase : str = cpu_offload_with_hook(snake_case_ , snake_case_ , prev_module_hook=snake_case_ )
# We'll offload the last model manually.
_lowerCAmelCase : Dict = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __UpperCamelCase ( self ):
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(snake_case_ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(snake_case_ )
def __call__( self , snake_case_ , snake_case_ , snake_case_ = 5_1_2 , snake_case_ = 5_1_2 , snake_case_ = 1_0_0 , snake_case_ = 4.0 , snake_case_ = 1 , snake_case_ = None , snake_case_ = None , snake_case_ = "pil" , snake_case_ = True , ):
_lowerCAmelCase : Optional[int] = self._execution_device
_lowerCAmelCase : Optional[Any] = guidance_scale > 1.0
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = torch.cat(snake_case_ , dim=0 )
_lowerCAmelCase : Optional[Any] = image_embeds.shape[0] * num_images_per_prompt
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : Any = torch.cat(snake_case_ , dim=0 )
if do_classifier_free_guidance:
_lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(snake_case_ , dim=0 )
_lowerCAmelCase : int = negative_image_embeds.repeat_interleave(snake_case_ , dim=0 )
_lowerCAmelCase : int = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=snake_case_ )
self.scheduler.set_timesteps(snake_case_ , device=snake_case_ )
_lowerCAmelCase : str = self.scheduler.timesteps
_lowerCAmelCase : Dict = self.unet.config.in_channels
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = downscale_height_and_width(snake_case_ , snake_case_ , self.movq_scale_factor )
# create initial latent
_lowerCAmelCase : List[str] = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , snake_case_ , snake_case_ , snake_case_ , self.scheduler , )
for i, t in enumerate(self.progress_bar(snake_case_ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCAmelCase : Optional[int] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCAmelCase : Tuple = {"""image_embeds""": image_embeds}
_lowerCAmelCase : Dict = self.unet(
sample=snake_case_ , timestep=snake_case_ , encoder_hidden_states=snake_case_ , added_cond_kwargs=snake_case_ , return_dict=snake_case_ , )[0]
if do_classifier_free_guidance:
_lowerCAmelCase , _lowerCAmelCase : Tuple = noise_pred.split(latents.shape[1] , dim=1 )
_lowerCAmelCase , _lowerCAmelCase : Tuple = noise_pred.chunk(2 )
_lowerCAmelCase , _lowerCAmelCase : Any = variance_pred.chunk(2 )
_lowerCAmelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCAmelCase , _lowerCAmelCase : int = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCAmelCase : Union[str, Any] = self.scheduler.step(
snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ , )[0]
# post-processing
_lowerCAmelCase : int = self.movq.decode(snake_case_ , force_not_quantize=snake_case_ )["""sample"""]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
_lowerCAmelCase : Any = image * 0.5 + 0.5
_lowerCAmelCase : List[Any] = image.clamp(0 , 1 )
_lowerCAmelCase : Tuple = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCAmelCase : List[str] = self.numpy_to_pil(snake_case_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case_ )
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class a_ (_a ):
__lowerCAmelCase : torch.FloatTensor
class a_ (_a , _a ):
@register_to_config
def __init__( self , snake_case_ = 6_5_5_3_6 , snake_case_ = None , snake_case_ = 2 , snake_case_ = 2 , snake_case_ = 0 , snake_case_ = "fourier" , snake_case_ = True , snake_case_ = False , snake_case_ = 0.0 , snake_case_ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , snake_case_ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , snake_case_ = "UNetMidBlock1D" , snake_case_ = None , snake_case_ = (3_2, 3_2, 6_4) , snake_case_ = None , snake_case_ = 8 , snake_case_ = 1 , snake_case_ = False , ):
super().__init__()
_lowerCAmelCase : Tuple = sample_size
# time
if time_embedding_type == "fourier":
_lowerCAmelCase : Dict = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=snake_case_ , log=snake_case_ , flip_sin_to_cos=snake_case_ )
_lowerCAmelCase : str = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
_lowerCAmelCase : int = Timesteps(
block_out_channels[0] , flip_sin_to_cos=snake_case_ , downscale_freq_shift=snake_case_ )
_lowerCAmelCase : int = block_out_channels[0]
if use_timestep_embedding:
_lowerCAmelCase : Optional[int] = block_out_channels[0] * 4
_lowerCAmelCase : Optional[Any] = TimestepEmbedding(
in_channels=snake_case_ , time_embed_dim=snake_case_ , act_fn=snake_case_ , out_dim=block_out_channels[0] , )
_lowerCAmelCase : Optional[int] = nn.ModuleList([] )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : int = nn.ModuleList([] )
_lowerCAmelCase : int = None
# down
_lowerCAmelCase : Any = in_channels
for i, down_block_type in enumerate(snake_case_ ):
_lowerCAmelCase : List[Any] = output_channel
_lowerCAmelCase : Any = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
_lowerCAmelCase : str = i == len(snake_case_ ) - 1
_lowerCAmelCase : Tuple = get_down_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(snake_case_ )
# mid
_lowerCAmelCase : Any = get_mid_block(
snake_case_ , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=snake_case_ , add_downsample=snake_case_ , )
# up
_lowerCAmelCase : Any = list(reversed(snake_case_ ) )
_lowerCAmelCase : Dict = reversed_block_out_channels[0]
if out_block_type is None:
_lowerCAmelCase : Optional[Any] = out_channels
else:
_lowerCAmelCase : str = block_out_channels[0]
for i, up_block_type in enumerate(snake_case_ ):
_lowerCAmelCase : Dict = output_channel
_lowerCAmelCase : List[Any] = (
reversed_block_out_channels[i + 1] if i < len(snake_case_ ) - 1 else final_upsample_channels
)
_lowerCAmelCase : Dict = i == len(snake_case_ ) - 1
_lowerCAmelCase : str = get_up_block(
snake_case_ , num_layers=snake_case_ , in_channels=snake_case_ , out_channels=snake_case_ , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(snake_case_ )
_lowerCAmelCase : List[Any] = output_channel
# out
_lowerCAmelCase : Dict = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 3_2 )
_lowerCAmelCase : List[str] = get_out_block(
out_block_type=snake_case_ , num_groups_out=snake_case_ , embed_dim=block_out_channels[0] , out_channels=snake_case_ , act_fn=snake_case_ , fc_dim=block_out_channels[-1] // 4 , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ = True , ):
_lowerCAmelCase : List[str] = timestep
if not torch.is_tensor(snake_case_ ):
_lowerCAmelCase : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
_lowerCAmelCase : Union[str, Any] = timesteps[None].to(sample.device )
_lowerCAmelCase : List[str] = self.time_proj(snake_case_ )
if self.config.use_timestep_embedding:
_lowerCAmelCase : str = self.time_mlp(snake_case_ )
else:
_lowerCAmelCase : str = timestep_embed[..., None]
_lowerCAmelCase : Any = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
_lowerCAmelCase : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
_lowerCAmelCase : List[str] = ()
for downsample_block in self.down_blocks:
_lowerCAmelCase , _lowerCAmelCase : List[str] = downsample_block(hidden_states=snake_case_ , temb=snake_case_ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
_lowerCAmelCase : List[str] = self.mid_block(snake_case_ , snake_case_ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
_lowerCAmelCase : Any = down_block_res_samples[-1:]
_lowerCAmelCase : Tuple = down_block_res_samples[:-1]
_lowerCAmelCase : List[str] = upsample_block(snake_case_ , res_hidden_states_tuple=snake_case_ , temb=snake_case_ )
# 5. post-process
if self.out_block:
_lowerCAmelCase : int = self.out_block(snake_case_ , snake_case_ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case_ )
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
debug_launcher(test_script.main )
def __UpperCamelCase ( self ):
debug_launcher(test_ops.main )
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""hustvl/yolos-small""": """https://huggingface.co/hustvl/yolos-small/resolve/main/config.json""",
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class a_ (_a ):
__lowerCAmelCase : List[Any] = """yolos"""
def __init__( self , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.0 , snake_case_=0.0 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=[5_1_2, 8_6_4] , snake_case_=1_6 , snake_case_=3 , snake_case_=True , snake_case_=1_0_0 , snake_case_=True , snake_case_=False , snake_case_=1 , snake_case_=5 , snake_case_=2 , snake_case_=5 , snake_case_=2 , snake_case_=0.1 , **snake_case_ , ):
super().__init__(**snake_case_ )
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Optional[Any] = num_attention_heads
_lowerCAmelCase : Optional[Any] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : int = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = initializer_range
_lowerCAmelCase : Union[str, Any] = layer_norm_eps
_lowerCAmelCase : Any = image_size
_lowerCAmelCase : Dict = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : Dict = qkv_bias
_lowerCAmelCase : Any = num_detection_tokens
_lowerCAmelCase : List[Any] = use_mid_position_embeddings
_lowerCAmelCase : str = auxiliary_loss
# Hungarian matcher
_lowerCAmelCase : Tuple = class_cost
_lowerCAmelCase : Any = bbox_cost
_lowerCAmelCase : List[Any] = giou_cost
# Loss coefficients
_lowerCAmelCase : Optional[Any] = bbox_loss_coefficient
_lowerCAmelCase : List[Any] = giou_loss_coefficient
_lowerCAmelCase : Any = eos_coefficient
class a_ (_a ):
__lowerCAmelCase : Tuple = version.parse("""1.11""" )
@property
def __UpperCamelCase ( self ):
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def __UpperCamelCase ( self ):
return 1E-4
@property
def __UpperCamelCase ( self ):
return 1_2
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Any , _lowerCamelCase : Dict ) -> Union[str, Any]:
for attribute in key.split(""".""" ):
_lowerCAmelCase : int = getattr(_lowerCamelCase , _lowerCamelCase )
if weight_type is not None:
_lowerCAmelCase : Dict = getattr(_lowerCamelCase , _lowerCamelCase ).shape
else:
_lowerCAmelCase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "weight_g":
_lowerCAmelCase : Dict = value
elif weight_type == "weight_v":
_lowerCAmelCase : Optional[Any] = value
elif weight_type == "bias":
_lowerCAmelCase : Optional[int] = value
else:
_lowerCAmelCase : Dict = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : str ) -> Union[str, Any]:
_lowerCAmelCase : Any = []
_lowerCAmelCase : Union[str, Any] = fairseq_model.state_dict()
_lowerCAmelCase : Optional[int] = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_lowerCAmelCase : Tuple = False
if "conv_layers" in name:
load_conv_layer(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , hf_model.config.feat_extract_norm == """group""" , )
_lowerCAmelCase : Tuple = True
else:
for key, mapped_key in MAPPING.items():
_lowerCAmelCase : List[str] = """hubert.""" + mapped_key if (is_finetuned and mapped_key != """lm_head""") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
_lowerCAmelCase : Any = True
if "*" in mapped_key:
_lowerCAmelCase : Optional[int] = name.split(_lowerCamelCase )[0].split(""".""" )[-2]
_lowerCAmelCase : str = mapped_key.replace("""*""" , _lowerCamelCase )
if "weight_g" in name:
_lowerCAmelCase : Optional[Any] = """weight_g"""
elif "weight_v" in name:
_lowerCAmelCase : List[str] = """weight_v"""
elif "weight" in name:
_lowerCAmelCase : List[str] = """weight"""
elif "bias" in name:
_lowerCAmelCase : Tuple = """bias"""
else:
_lowerCAmelCase : Dict = None
set_recursively(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
continue
if not is_used:
unused_weights.append(_lowerCamelCase )
logger.warning(f'Unused weights: {unused_weights}' )
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : int , _lowerCamelCase : int , _lowerCamelCase : List[Any] ) -> Union[str, Any]:
_lowerCAmelCase : Optional[Any] = full_name.split("""conv_layers.""" )[-1]
_lowerCAmelCase : int = name.split(""".""" )
_lowerCAmelCase : Any = int(items[0] )
_lowerCAmelCase : List[str] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_lowerCAmelCase : Any = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_lowerCAmelCase : Union[str, Any] = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_lowerCAmelCase : int = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(_lowerCamelCase )
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : Dict=None , _lowerCamelCase : Dict=True ) -> List[str]:
if config_path is not None:
_lowerCAmelCase : int = HubertConfig.from_pretrained(_lowerCamelCase )
else:
_lowerCAmelCase : Tuple = HubertConfig()
if is_finetuned:
if dict_path:
_lowerCAmelCase : str = Dictionary.load(_lowerCamelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_lowerCAmelCase : Union[str, Any] = target_dict.pad_index
_lowerCAmelCase : Optional[int] = target_dict.bos_index
_lowerCAmelCase : Optional[Any] = target_dict.eos_index
_lowerCAmelCase : Optional[Any] = len(target_dict.symbols )
_lowerCAmelCase : Tuple = os.path.join(_lowerCamelCase , """vocab.json""" )
if not os.path.isdir(_lowerCamelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_lowerCamelCase ) )
return
os.makedirs(_lowerCamelCase , exist_ok=_lowerCamelCase )
with open(_lowerCamelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _lowerCamelCase )
_lowerCAmelCase : Tuple = WavaVecaCTCTokenizer(
_lowerCamelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_lowerCamelCase , )
_lowerCAmelCase : Tuple = True if config.feat_extract_norm == """layer""" else False
_lowerCAmelCase : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCamelCase , return_attention_mask=_lowerCamelCase , )
_lowerCAmelCase : str = WavaVecaProcessor(feature_extractor=_lowerCamelCase , tokenizer=_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = HubertForCTC(_lowerCamelCase )
else:
_lowerCAmelCase : int = HubertModel(_lowerCamelCase )
if is_finetuned:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
_lowerCAmelCase : Dict = model[0].eval()
recursively_load_weights(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
hf_wavavec.save_pretrained(_lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCamelCase_ = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class a_ (_a ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
_lowerCAmelCase : List[Any] = 8
# DPR tok
_lowerCAmelCase : List[str] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase : Dict = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : str = os.path.join(snake_case_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
_lowerCAmelCase : Dict = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
_lowerCAmelCase : List[str] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
_lowerCAmelCase : Optional[int] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
_lowerCAmelCase : List[str] = {"""unk_token""": """<unk>"""}
_lowerCAmelCase : int = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : str = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
_lowerCAmelCase : Any = os.path.join(snake_case_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(snake_case_ ) )
def __UpperCamelCase ( self ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __UpperCamelCase ( self ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def __UpperCamelCase ( self ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.get_dummy_dataset()
_lowerCAmelCase : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowerCAmelCase : str = dataset
_lowerCAmelCase : Optional[Any] = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = self.get_dummy_dataset()
_lowerCAmelCase : Optional[int] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""custom""" , )
if from_disk:
_lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , """dataset""" )
_lowerCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , """index.faiss""" )
dataset.get_index("""embeddings""" ).save(os.path.join(self.tmpdirname , """index.faiss""" ) )
dataset.drop_index("""embeddings""" )
dataset.save_to_disk(os.path.join(self.tmpdirname , """dataset""" ) )
del dataset
_lowerCAmelCase : Union[str, Any] = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
_lowerCAmelCase : Tuple = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , snake_case_ ) , )
return retriever
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""text""": ["""foo""", """bar"""],
"""title""": ["""Foo""", """Bar"""],
"""embeddings""": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("""embeddings""" , string_factory="""Flat""" , metric_type=faiss.METRIC_INNER_PRODUCT )
_lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , """hf_bert_base.hnswSQ8_correct_phi_128.c_index""" )
dataset.save_faiss_index("""embeddings""" , index_file_name + """.index.dpr""" )
pickle.dump(dataset["""id"""] , open(index_file_name + """.index_meta.dpr""" , """wb""" ) )
_lowerCAmelCase : List[Any] = os.path.join(self.tmpdirname , """psgs_w100.tsv.pkl""" )
_lowerCAmelCase : List[str] = {sample["""id"""]: [sample["""text"""], sample["""title"""]] for sample in dataset}
pickle.dump(snake_case_ , open(snake_case_ , """wb""" ) )
_lowerCAmelCase : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="""legacy""" , index_path=self.tmpdirname , )
_lowerCAmelCase : List[Any] = RagRetriever(
snake_case_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = 1
_lowerCAmelCase : Any = self.get_dummy_canonical_hf_index_retriever()
_lowerCAmelCase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("""transformers.models.rag.retrieval_rag.load_dataset""" ) as mock_load_dataset:
_lowerCAmelCase : Dict = self.get_dummy_dataset()
retriever.save_pretrained(snake_case_ )
_lowerCAmelCase : Optional[int] = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[Any] = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = 1
_lowerCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
_lowerCAmelCase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[str] = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case_ )
_lowerCAmelCase : Any = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[Any] = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = 1
_lowerCAmelCase : List[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
_lowerCAmelCase : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""embeddings""", """id""", """text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""id"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""id"""][0] , """1""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""id"""][0] , """0""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case_ )
_lowerCAmelCase : str = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[str] = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = 1
_lowerCAmelCase : Tuple = self.get_dummy_legacy_index_retriever()
_lowerCAmelCase : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Any = retriever.retrieve(snake_case_ , n_docs=snake_case_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(snake_case_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["""text""", """title"""] )
self.assertEqual(len(doc_dicts[0]["""text"""] ) , snake_case_ )
self.assertEqual(doc_dicts[0]["""text"""][0] , """bar""" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["""text"""][0] , """foo""" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(snake_case_ )
_lowerCAmelCase : str = RagRetriever.from_pretrained(snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
_lowerCAmelCase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : Optional[Any] = retriever.retrieve(snake_case_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self ):
import torch
_lowerCAmelCase : Tuple = 1
_lowerCAmelCase : Any = self.get_dummy_canonical_hf_index_retriever()
_lowerCAmelCase : int = [[5, 7], [1_0, 1_1]]
_lowerCAmelCase : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : List[str] = retriever(snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = (
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(snake_case_ , snake_case_ )
self.assertIsInstance(snake_case_ , np.ndarray )
_lowerCAmelCase : Tuple = retriever(
snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ , return_tensors="""pt""" , )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = ( # noqa: F841
out["""context_input_ids"""],
out["""context_attention_mask"""],
out["""retrieved_doc_embeds"""],
out["""doc_ids"""],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(snake_case_ , torch.Tensor )
self.assertIsInstance(snake_case_ , torch.Tensor )
self.assertIsInstance(snake_case_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_dpr_ctx_encoder_tokenizer()
_lowerCAmelCase : Any = 1
_lowerCAmelCase : Dict = self.get_dummy_custom_hf_index_retriever(from_disk=snake_case_ )
retriever.set_ctx_encoder_tokenizer(snake_case_ )
_lowerCAmelCase : Optional[int] = [[5, 7], [1_0, 1_1]]
_lowerCAmelCase : List[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
_lowerCAmelCase : Tuple = retriever(snake_case_ , snake_case_ , prefix=retriever.config.generator.prefix , n_docs=snake_case_ )
self.assertEqual(
len(snake_case_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("""tokenized_doc_ids""", """tokenized_doc_attention_mask""") ) , snake_case_ ) # check for doc token related keys in dictionary.
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class a_ (unittest.TestCase ):
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=7 , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=True , snake_case_=9_9 , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=1_6 , snake_case_=2 , snake_case_=0.02 , snake_case_=4 , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : Dict = batch_size
_lowerCAmelCase : Dict = seq_length
_lowerCAmelCase : List[str] = is_training
_lowerCAmelCase : Optional[Any] = use_attention_mask
_lowerCAmelCase : Optional[int] = use_token_type_ids
_lowerCAmelCase : Optional[int] = use_labels
_lowerCAmelCase : Any = vocab_size
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Dict = num_hidden_layers
_lowerCAmelCase : Any = num_attention_heads
_lowerCAmelCase : Optional[int] = intermediate_size
_lowerCAmelCase : str = hidden_act
_lowerCAmelCase : List[Any] = hidden_dropout_prob
_lowerCAmelCase : Union[str, Any] = attention_probs_dropout_prob
_lowerCAmelCase : Tuple = max_position_embeddings
_lowerCAmelCase : Tuple = type_vocab_size
_lowerCAmelCase : str = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : int = num_choices
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : str = None
if self.use_attention_mask:
_lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : int = None
if self.use_token_type_ids:
_lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[Any] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case_ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Dict = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Tuple = config_and_inputs
_lowerCAmelCase : List[str] = True
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class a_ (_a , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = True
__lowerCAmelCase : Optional[Any] = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = FlaxBertModelTester(self )
@slow
def __UpperCamelCase ( self ):
# Only check this for base model, not necessary for all model classes.
# This will also help speed-up tests.
_lowerCAmelCase : List[Any] = FlaxBertModel.from_pretrained("""bert-base-cased""" )
_lowerCAmelCase : Any = model(np.ones((1, 1) ) )
self.assertIsNotNone(snake_case_ )
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
UpperCamelCase_ = """scheduler_config.json"""
class a_ (_a ):
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : int = 2
__lowerCAmelCase : str = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Optional[Any] = 5
@dataclass
class a_ (_a ):
__lowerCAmelCase : jnp.ndarray
class a_ :
__lowerCAmelCase : List[str] = SCHEDULER_CONFIG_NAME
__lowerCAmelCase : Any = ["""dtype"""]
__lowerCAmelCase : int = []
__lowerCAmelCase : Tuple = True
@classmethod
def __UpperCamelCase ( cls , snake_case_ = None , snake_case_ = None , snake_case_=False , **snake_case_ , ):
_lowerCAmelCase , _lowerCAmelCase : Any = cls.load_config(
pretrained_model_name_or_path=snake_case_ , subfolder=snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ , )
_lowerCAmelCase , _lowerCAmelCase : str = cls.from_config(snake_case_ , return_unused_kwargs=snake_case_ , **snake_case_ )
if hasattr(snake_case_ , """create_state""" ) and getattr(snake_case_ , """has_state""" , snake_case_ ):
_lowerCAmelCase : Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def __UpperCamelCase ( self , snake_case_ , snake_case_ = False , **snake_case_ ):
self.save_config(save_directory=snake_case_ , push_to_hub=snake_case_ , **snake_case_ )
@property
def __UpperCamelCase ( self ):
return self._get_compatibles()
@classmethod
def __UpperCamelCase ( cls ):
_lowerCAmelCase : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
_lowerCAmelCase : Optional[int] = importlib.import_module(__name__.split(""".""" )[0] )
_lowerCAmelCase : Tuple = [
getattr(snake_case_ , snake_case_ ) for c in compatible_classes_str if hasattr(snake_case_ , snake_case_ )
]
return compatible_classes
def _UpperCAmelCase ( _lowerCamelCase : jnp.ndarray , _lowerCamelCase : Tuple[int] ) -> jnp.ndarray:
assert len(_lowerCamelCase ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(_lowerCamelCase ) - x.ndim) ) , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Dict=0.999 , _lowerCamelCase : List[Any]=jnp.floataa ) -> jnp.ndarray:
def alpha_bar(_lowerCamelCase : List[str] ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
_lowerCAmelCase : str = []
for i in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = i / num_diffusion_timesteps
_lowerCAmelCase : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(_lowerCamelCase ) / alpha_bar(_lowerCamelCase ) , _lowerCamelCase ) )
return jnp.array(_lowerCamelCase , dtype=_lowerCamelCase )
@flax.struct.dataclass
class a_ :
__lowerCAmelCase : jnp.ndarray
__lowerCAmelCase : jnp.ndarray
__lowerCAmelCase : jnp.ndarray
@classmethod
def __UpperCamelCase ( cls , snake_case_ ):
_lowerCAmelCase : Optional[int] = scheduler.config
if config.trained_betas is not None:
_lowerCAmelCase : List[str] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
_lowerCAmelCase : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
_lowerCAmelCase : Dict = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
_lowerCAmelCase : Optional[int] = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f'beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}' )
_lowerCAmelCase : List[str] = 1.0 - betas
_lowerCAmelCase : List[str] = jnp.cumprod(snake_case_ , axis=0 )
return cls(
alphas=snake_case_ , betas=snake_case_ , alphas_cumprod=snake_case_ , )
def _UpperCAmelCase ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ) -> str:
_lowerCAmelCase : Union[str, Any] = state.alphas_cumprod
_lowerCAmelCase : Tuple = alphas_cumprod[timesteps] ** 0.5
_lowerCAmelCase : int = sqrt_alpha_prod.flatten()
_lowerCAmelCase : List[str] = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape )
_lowerCAmelCase : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
_lowerCAmelCase : List[Any] = sqrt_one_minus_alpha_prod.flatten()
_lowerCAmelCase : Tuple = broadcast_to_shape_from_left(_lowerCamelCase , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _UpperCAmelCase ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ) -> Union[str, Any]:
_lowerCAmelCase , _lowerCAmelCase : Tuple = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _UpperCAmelCase ( _lowerCamelCase : CommonSchedulerState , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray , _lowerCamelCase : jnp.ndarray ) -> Optional[int]:
_lowerCAmelCase , _lowerCAmelCase : Dict = get_sqrt_alpha_prod(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Optional[Any] = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """sentencepiece.bpe.model"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""camembert-base""": """https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model""",
}
}
UpperCamelCase_ = {
"""camembert-base""": 5_12,
}
UpperCamelCase_ = """▁"""
class a_ (_a ):
__lowerCAmelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : List[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : List[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self , snake_case_ , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<s>" , snake_case_="<unk>" , snake_case_="<pad>" , snake_case_="<mask>" , snake_case_=["<s>NOTUSED", "</s>NOTUSED"] , snake_case_ = None , **snake_case_ , ):
# Mask token behave like a normal word, i.e. include the space before it
_lowerCAmelCase : Dict = AddedToken(snake_case_ , lstrip=snake_case_ , rstrip=snake_case_ ) if isinstance(snake_case_ , snake_case_ ) else mask_token
_lowerCAmelCase : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=snake_case_ , eos_token=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , cls_token=snake_case_ , pad_token=snake_case_ , mask_token=snake_case_ , additional_special_tokens=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , **snake_case_ , )
_lowerCAmelCase : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(snake_case_ ) )
_lowerCAmelCase : Dict = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
_lowerCAmelCase : List[Any] = {"""<s>NOTUSED""": 0, """<pad>""": 1, """</s>NOTUSED""": 2, """<unk>""": 3}
_lowerCAmelCase : Tuple = len(self.fairseq_tokens_to_ids )
_lowerCAmelCase : List[str] = len(self.sp_model ) + len(self.fairseq_tokens_to_ids )
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
_lowerCAmelCase : int = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
if token_ids_a is None:
return [1] + ([0] * len(snake_case_ )) + [1]
return [1] + ([0] * len(snake_case_ )) + [1, 1] + ([0] * len(snake_case_ )) + [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : List[str] = [self.sep_token_id]
_lowerCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __UpperCamelCase ( self ):
return len(self.fairseq_tokens_to_ids ) + len(self.sp_model )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(snake_case_ ) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Tuple = []
_lowerCAmelCase : Tuple = """"""
_lowerCAmelCase : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : int = True
_lowerCAmelCase : str = []
else:
current_sub_tokens.append(snake_case_ )
_lowerCAmelCase : List[str] = False
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __getstate__( self ):
_lowerCAmelCase : Any = self.__dict__.copy()
_lowerCAmelCase : Optional[Any] = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : int = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : int = {}
_lowerCAmelCase : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if not os.path.isdir(snake_case_ ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_lowerCAmelCase : List[Any] = os.path.join(
snake_case_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , snake_case_ )
elif not os.path.isfile(self.vocab_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (out_vocab_file,)
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
UpperCamelCase_ = trt.Logger(trt.Logger.WARNING)
UpperCamelCase_ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
UpperCamelCase_ = logging.getLogger(__name__)
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--onnx_model_path""",
default=None,
type=str,
required=True,
help="""Path to ONNX model: """,
)
parser.add_argument(
"""--output_dir""",
default=None,
type=str,
required=True,
help="""The output directory where the model checkpoints and predictions will be written.""",
)
# Other parameters
parser.add_argument(
"""--tokenizer_name""",
default="""""",
type=str,
required=True,
help="""Pretrained tokenizer name or path if not the same as model_name""",
)
parser.add_argument(
"""--version_2_with_negative""",
action="""store_true""",
help="""If true, the SQuAD examples contain some that do not have an answer.""",
)
parser.add_argument(
"""--null_score_diff_threshold""",
type=float,
default=0.0,
help="""If null_score - best_non_null is greater than the threshold predict null.""",
)
parser.add_argument(
"""--max_seq_length""",
default=3_84,
type=int,
help=(
"""The maximum total input sequence length after WordPiece tokenization. Sequences """
"""longer than this will be truncated, and sequences shorter than this will be padded."""
),
)
parser.add_argument(
"""--doc_stride""",
default=1_28,
type=int,
help="""When splitting up a long document into chunks, how much stride to take between chunks.""",
)
parser.add_argument("""--per_device_eval_batch_size""", default=8, type=int, help="""Batch size per GPU/CPU for evaluation.""")
parser.add_argument(
"""--n_best_size""",
default=20,
type=int,
help="""The total number of n-best predictions to generate in the nbest_predictions.json output file.""",
)
parser.add_argument(
"""--max_answer_length""",
default=30,
type=int,
help=(
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
),
)
parser.add_argument("""--seed""", type=int, default=42, help="""random seed for initialization""")
parser.add_argument(
"""--dataset_name""",
type=str,
default=None,
required=True,
help="""The name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--dataset_config_name""",
type=str,
default=None,
help="""The configuration name of the dataset to use (via the datasets library).""",
)
parser.add_argument(
"""--preprocessing_num_workers""", type=int, default=4, help="""A csv or a json file containing the training data."""
)
parser.add_argument("""--overwrite_cache""", action="""store_true""", help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--fp16""",
action="""store_true""",
help="""Whether to use 16-bit (mixed) precision instead of 32-bit""",
)
parser.add_argument(
"""--int8""",
action="""store_true""",
help="""Whether to use INT8""",
)
UpperCamelCase_ = parser.parse_args()
if args.tokenizer_name:
UpperCamelCase_ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name."""
)
logger.info("""Training/evaluation parameters %s""", args)
UpperCamelCase_ = args.per_device_eval_batch_size
UpperCamelCase_ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
UpperCamelCase_ = True
UpperCamelCase_ = """temp_engine/bert-fp32.engine"""
if args.fpaa:
UpperCamelCase_ = """temp_engine/bert-fp16.engine"""
if args.inta:
UpperCamelCase_ = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists("""temp_engine"""):
os.makedirs("""temp_engine""")
UpperCamelCase_ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, """rb""") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
UpperCamelCase_ = [network.get_input(i) for i in range(network.num_inputs)]
UpperCamelCase_ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
UpperCamelCase_ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
UpperCamelCase_ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
UpperCamelCase_ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, """wb""") as f:
f.write(engine.serialize())
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : List[Any] , _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[Any] ) -> int:
_lowerCAmelCase : List[str] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa )
_lowerCAmelCase : Union[str, Any] = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa )
_lowerCAmelCase : int = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , _lowerCamelCase )
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , _lowerCamelCase )
# start time
_lowerCAmelCase : str = time.time()
# Run inference
context.execute_async(
bindings=[int(_lowerCamelCase ) for d_inp in d_inputs] + [int(_lowerCamelCase ), int(_lowerCamelCase )] , stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
cuda.memcpy_dtoh_async(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Synchronize the stream and take time
stream.synchronize()
# end time
_lowerCAmelCase : int = time.time()
_lowerCAmelCase : int = end_time - start_time
_lowerCAmelCase : Dict = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
UpperCamelCase_ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""",
datefmt="""%m/%d/%Y %H:%M:%S""",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
UpperCamelCase_ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("""Evaluation requires a dataset name""")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
UpperCamelCase_ = raw_datasets["""validation"""].column_names
UpperCamelCase_ = """question""" if """question""" in column_names else column_names[0]
UpperCamelCase_ = """context""" if """context""" in column_names else column_names[1]
UpperCamelCase_ = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
UpperCamelCase_ = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
F'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
F'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
UpperCamelCase_ = min(args.max_seq_length, tokenizer.model_max_length)
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Any:
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
_lowerCAmelCase : Any = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
_lowerCAmelCase : Optional[Any] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=_lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
_lowerCAmelCase : int = tokenized_examples.pop("""overflow_to_sample_mapping""" )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
_lowerCAmelCase : int = []
for i in range(len(tokenized_examples["""input_ids"""] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
_lowerCAmelCase : List[Any] = tokenized_examples.sequence_ids(_lowerCamelCase )
_lowerCAmelCase : Dict = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
_lowerCAmelCase : Optional[Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
_lowerCAmelCase : str = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i] )
]
return tokenized_examples
UpperCamelCase_ = raw_datasets["""validation"""]
# Validation Feature Creation
UpperCamelCase_ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="""Running tokenizer on validation dataset""",
)
UpperCamelCase_ = default_data_collator
UpperCamelCase_ = eval_dataset.remove_columns(["""example_id""", """offset_mapping"""])
UpperCamelCase_ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str , _lowerCamelCase : Optional[int] , _lowerCamelCase : Optional[int]="eval" ) -> int:
# Post-processing: we match the start logits and end logits to answers in the original context.
_lowerCAmelCase : Optional[int] = postprocess_qa_predictions(
examples=_lowerCamelCase , features=_lowerCamelCase , predictions=_lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=_lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
_lowerCAmelCase : int = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
_lowerCAmelCase : int = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
_lowerCAmelCase : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_lowerCamelCase , label_ids=_lowerCamelCase )
UpperCamelCase_ = load_metric("""squad_v2""" if args.version_2_with_negative else """squad""")
# Evaluation!
logger.info("""Loading ONNX model %s for evaluation""", args.onnx_model_path)
with open(engine_name, """rb""") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def _UpperCAmelCase ( _lowerCamelCase : Dict ) -> int:
return trt.volume(engine.get_binding_shape(_lowerCamelCase ) ) * engine.get_binding_dtype(_lowerCamelCase ).itemsize
# Allocate device memory for inputs and outputs.
UpperCamelCase_ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
UpperCamelCase_ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
UpperCamelCase_ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
UpperCamelCase_ = cuda.Stream()
# Evaluation
logger.info("""***** Running Evaluation *****""")
logger.info(F' Num examples = {len(eval_dataset)}')
logger.info(F' Batch size = {args.per_device_eval_batch_size}')
UpperCamelCase_ = 0.0
UpperCamelCase_ = 0
UpperCamelCase_ = timeit.default_timer()
UpperCamelCase_ = None
for step, batch in enumerate(eval_dataloader):
UpperCamelCase_ , UpperCamelCase_ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
UpperCamelCase_ , UpperCamelCase_ = outputs
UpperCamelCase_ = torch.tensor(start_logits)
UpperCamelCase_ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
UpperCamelCase_ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-1_00)
UpperCamelCase_ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-1_00)
UpperCamelCase_ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
UpperCamelCase_ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-1_00)
if all_preds is not None:
UpperCamelCase_ = nested_truncate(all_preds, len(eval_dataset))
UpperCamelCase_ = timeit.default_timer() - start_time
logger.info(""" Evaluation done in total %f secs (%f sec per example)""", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("""Average Inference Time = {:.3f} ms""".format(total_time * 10_00 / niter))
logger.info("""Total Inference Time = {:.3f} ms""".format(total_time * 10_00))
logger.info("""Total Number of Inference = %d""", niter)
UpperCamelCase_ = post_processing_function(eval_examples, eval_dataset, all_preds)
UpperCamelCase_ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(F'Evaluation metrics: {eval_metric}')
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
UpperCamelCase_ = {
"joule": 1.0,
"kilojoule": 10_00,
"megajoule": 1_00_00_00,
"gigajoule": 10_00_00_00_00,
"wattsecond": 1.0,
"watthour": 36_00,
"kilowatthour": 3_60_00_00,
"newtonmeter": 1.0,
"calorie_nutr": 41_86.8,
"kilocalorie_nutr": 4_18_68_00.00,
"electronvolt": 1.6_0_2_1_7_6_6_3_4e-1_9,
"britishthermalunit_it": 10_55.0_55_85,
"footpound": 1.3_5_5_8_1_8,
}
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str , _lowerCamelCase : float ) -> float:
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
_lowerCAmelCase : Dict = (
f'Incorrect \'from_type\' or \'to_type\' value: {from_type!r}, {to_type!r}\n'
f'Valid values are: {", ".join(_lowerCamelCase )}'
)
raise ValueError(_lowerCamelCase )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
UpperCamelCase_ = 2_00
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
UpperCamelCase_ = 50
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
UpperCamelCase_ = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 10_00))
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ) -> tuple[str, float]:
_lowerCAmelCase : Any = len([g for position, g in enumerate(_lowerCamelCase ) if g == main_target[position]] )
return (item, float(_lowerCamelCase ))
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : str ) -> tuple[str, str]:
_lowerCAmelCase : List[Any] = random.randint(0 , len(_lowerCamelCase ) - 1 )
_lowerCAmelCase : List[Any] = parent_a[:random_slice] + parent_a[random_slice:]
_lowerCAmelCase : Tuple = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : list[str] ) -> str:
_lowerCAmelCase : List[Any] = list(_lowerCamelCase )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_lowerCAmelCase : str = random.choice(_lowerCamelCase )
return "".join(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : tuple[str, float] , _lowerCamelCase : list[tuple[str, float]] , _lowerCamelCase : list[str] , ) -> list[str]:
_lowerCAmelCase : Dict = []
# Generate more children proportionally to the fitness score.
_lowerCAmelCase : Dict = int(parent_a[1] * 1_00 ) + 1
_lowerCAmelCase : int = 10 if child_n >= 10 else child_n
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = population_score[random.randint(0 , _lowerCamelCase )][0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = crossover(parent_a[0] , _lowerCamelCase )
# Append new string to the population list.
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
pop.append(mutate(_lowerCamelCase , _lowerCamelCase ) )
return pop
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : list[str] , _lowerCamelCase : bool = True ) -> tuple[int, int, str]:
# Verify if N_POPULATION is bigger than N_SELECTED
if N_POPULATION < N_SELECTED:
_lowerCAmelCase : Optional[int] = f'{N_POPULATION} must be bigger than {N_SELECTED}'
raise ValueError(_lowerCamelCase )
# Verify that the target contains no genes besides the ones inside genes variable.
_lowerCAmelCase : str = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_lowerCAmelCase : Dict = f'{not_in_genes_list} is not in genes list, evolution cannot converge'
raise ValueError(_lowerCamelCase )
# Generate random starting population.
_lowerCAmelCase : List[str] = []
for _ in range(_lowerCamelCase ):
population.append("""""".join([random.choice(_lowerCamelCase ) for i in range(len(_lowerCamelCase ) )] ) )
# Just some logs to know what the algorithms is doing.
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_lowerCamelCase )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_lowerCAmelCase : List[Any] = [evaluate(_lowerCamelCase , _lowerCamelCase ) for item in population]
# Check if there is a matching evolution.
_lowerCAmelCase : List[str] = sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] , reverse=_lowerCamelCase )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 10 == 0:
print(
f'\nGeneration: {generation}'
f'\nTotal Population:{total_population}'
f'\nBest score: {population_score[0][1]}'
f'\nBest string: {population_score[0][0]}' )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_lowerCAmelCase : int = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_lowerCamelCase )
# Normalize population score to be between 0 and 1.
_lowerCAmelCase : Optional[int] = [
(item, score / len(_lowerCamelCase )) for item, score in population_score
]
# This is selection
for i in range(_lowerCamelCase ):
population.extend(select(population_score[int(_lowerCamelCase )] , _lowerCamelCase , _lowerCamelCase ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_lowerCamelCase ) > N_POPULATION:
break
if __name__ == "__main__":
UpperCamelCase_ = (
"""This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!"""
)
UpperCamelCase_ = list(
""" ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm"""
"""nopqrstuvwxyz.,;!?+-*#@^'èéòà€ù=)(&%$£/\\"""
)
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = basic(target_str, genes_list)
print(
F'\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}'
)
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
import enum
import shutil
import sys
UpperCamelCase_ , UpperCamelCase_ = shutil.get_terminal_size()
UpperCamelCase_ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class a_ (enum.Enum ):
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = 1
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int="" ) -> Optional[int]:
sys.stdout.write(str(_lowerCamelCase ) + end )
sys.stdout.flush()
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple="" ) -> Union[str, Any]:
forceWrite(f'\u001b[{color}m{content}\u001b[0m' , _lowerCamelCase )
def _UpperCAmelCase ( ) -> Dict:
forceWrite("""\r""" )
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : str ) -> List[Any]:
forceWrite(f'\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}' )
def _UpperCAmelCase ( ) -> Optional[Any]:
forceWrite(""" """ * TERMINAL_WIDTH )
reset_cursor()
def _UpperCAmelCase ( ) -> Union[str, Any]:
reset_cursor()
forceWrite("""-""" * TERMINAL_WIDTH )
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
'''simple docstring'''
from sklearn.metrics import recall_score
import datasets
UpperCamelCase_ = """
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
"""
UpperCamelCase_ = """
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the 'positive class' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `'binary'`.
- `'binary'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `'micro'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `'macro'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `'weighted'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `'macro'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `'samples'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `'warn'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{'recall': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric('recall')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{'recall': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric('recall')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{'recall': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric('recall')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average='macro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='micro')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average='weighted')
>>> print(results)
{'recall': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{'recall': array([1., 0., 0.])}
"""
UpperCamelCase_ = """
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ (datasets.Metric ):
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""int32""" ) ),
"""references""": datasets.Sequence(datasets.Value("""int32""" ) ),
}
if self.config_name == """multilabel"""
else {
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) , reference_urls=["""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html"""] , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=1 , snake_case_="binary" , snake_case_=None , snake_case_="warn" , ):
_lowerCAmelCase : Dict = recall_score(
snake_case_ , snake_case_ , labels=snake_case_ , pos_label=snake_case_ , average=snake_case_ , sample_weight=snake_case_ , zero_division=snake_case_ , )
return {"recall": float(snake_case_ ) if score.size == 1 else score}
| 309
|
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"""vocab_file""": """vocab.txt"""}
UpperCamelCase_ = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": 5_12,
"""YituTech/conv-bert-medium-small""": 5_12,
"""YituTech/conv-bert-small""": 5_12,
}
UpperCamelCase_ = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class a_ (_a ):
__lowerCAmelCase : Any = VOCAB_FILES_NAMES
__lowerCAmelCase : int = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Optional[int] = ConvBertTokenizer
def __init__( self , snake_case_=None , snake_case_=None , snake_case_=True , snake_case_="[UNK]" , snake_case_="[SEP]" , snake_case_="[PAD]" , snake_case_="[CLS]" , snake_case_="[MASK]" , snake_case_=True , snake_case_=None , **snake_case_ , ):
super().__init__(
snake_case_ , tokenizer_file=snake_case_ , do_lower_case=snake_case_ , unk_token=snake_case_ , sep_token=snake_case_ , pad_token=snake_case_ , cls_token=snake_case_ , mask_token=snake_case_ , tokenize_chinese_chars=snake_case_ , strip_accents=snake_case_ , **snake_case_ , )
_lowerCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , snake_case_ ) != do_lower_case
or normalizer_state.get("""strip_accents""" , snake_case_ ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , snake_case_ ) != tokenize_chinese_chars
):
_lowerCAmelCase : Dict = getattr(snake_case_ , normalizer_state.pop("""type""" ) )
_lowerCAmelCase : List[str] = do_lower_case
_lowerCAmelCase : str = strip_accents
_lowerCAmelCase : List[Any] = tokenize_chinese_chars
_lowerCAmelCase : List[Any] = normalizer_class(**snake_case_ )
_lowerCAmelCase : str = do_lower_case
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Optional[Any] = [self.sep_token_id]
_lowerCAmelCase : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Any = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
| 309
| 1
|
'''simple docstring'''
import argparse
import torch
from transformers import BertConfig, BertForPreTraining, load_tf_weights_in_bert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> List[Any]:
# Initialise PyTorch model
_lowerCAmelCase : Optional[int] = BertConfig.from_json_file(_lowerCamelCase )
print(f'Building PyTorch model from configuration: {config}' )
_lowerCAmelCase : Union[str, Any] = BertForPreTraining(_lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_bert(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , _lowerCamelCase )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--bert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
UpperCamelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 309
|
'''simple docstring'''
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class a_ :
def __init__( self ):
_lowerCAmelCase : Any = """"""
_lowerCAmelCase : List[Any] = """"""
_lowerCAmelCase : List[Any] = []
_lowerCAmelCase : int = 0
_lowerCAmelCase : str = 2_5_6
_lowerCAmelCase : List[str] = 0
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = 0
_lowerCAmelCase : Dict = 0
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : str = cva.imread(snake_case_ , 0 )
_lowerCAmelCase : List[str] = copy.deepcopy(self.img )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[Any] = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label="""x""" )
_lowerCAmelCase : List[Any] = np.sum(snake_case_ )
for i in range(len(snake_case_ ) ):
_lowerCAmelCase : Optional[int] = x[i] / self.k
self.sk += prk
_lowerCAmelCase : Any = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCAmelCase : Dict = int(last % last )
_lowerCAmelCase : str = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(snake_case_ )
_lowerCAmelCase : str = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCAmelCase : Union[str, Any] = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCAmelCase : Union[str, Any] = self.img[j][i]
if num != self.last_list[num]:
_lowerCAmelCase : List[str] = self.last_list[num]
cva.imwrite("""output_data/output.jpg""" , self.img )
def __UpperCamelCase ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def __UpperCamelCase ( self ):
cva.imshow("""Output-Image""" , self.img )
cva.imshow("""Input-Image""" , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
UpperCamelCase_ = os.path.join(os.path.basename(__file__), """image_data/input.jpg""")
UpperCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 309
| 1
|
'''simple docstring'''
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class a_ (_a ):
__lowerCAmelCase : Tuple = (DPMSolverSinglestepScheduler,)
__lowerCAmelCase : str = (("""num_inference_steps""", 2_5),)
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : int = {
"""num_train_timesteps""": 1_0_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""prediction_type""": """epsilon""",
"""thresholding""": False,
"""sample_max_value""": 1.0,
"""algorithm_type""": """dpmsolver++""",
"""solver_type""": """midpoint""",
"""lambda_min_clipped""": -float("""inf""" ),
"""variance_type""": None,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : Tuple = dict(self.forward_default_kwargs )
_lowerCAmelCase : Any = kwargs.pop("""num_inference_steps""" , snake_case_ )
_lowerCAmelCase : List[str] = self.dummy_sample
_lowerCAmelCase : List[Any] = 0.1 * sample
_lowerCAmelCase : int = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[int] = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_lowerCAmelCase : List[Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_lowerCAmelCase : Union[str, Any] = scheduler_class.from_pretrained(snake_case_ )
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals
_lowerCAmelCase : Optional[Any] = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase , _lowerCAmelCase : List[str] = sample, sample
for t in range(snake_case_ , time_step + scheduler.config.solver_order + 1 ):
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : int = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self , snake_case_=0 , **snake_case_ ):
_lowerCAmelCase : Optional[int] = dict(self.forward_default_kwargs )
_lowerCAmelCase : int = kwargs.pop("""num_inference_steps""" , snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.dummy_sample
_lowerCAmelCase : Optional[int] = 0.1 * sample
_lowerCAmelCase : str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
_lowerCAmelCase : List[str] = scheduler_class(**snake_case_ )
scheduler.set_timesteps(snake_case_ )
# copy over dummy past residuals (must be after setting timesteps)
_lowerCAmelCase : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(snake_case_ )
_lowerCAmelCase : int = scheduler_class.from_pretrained(snake_case_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(snake_case_ )
# copy over dummy past residual (must be after setting timesteps)
_lowerCAmelCase : Tuple = dummy_past_residuals[: new_scheduler.config.solver_order]
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
_lowerCAmelCase : Optional[Any] = new_scheduler.step(snake_case_ , snake_case_ , snake_case_ , **snake_case_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def __UpperCamelCase ( self , snake_case_=None , **snake_case_ ):
if scheduler is None:
_lowerCAmelCase : List[Any] = self.scheduler_classes[0]
_lowerCAmelCase : Dict = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case_ )
_lowerCAmelCase : Tuple = self.scheduler_classes[0]
_lowerCAmelCase : int = self.get_scheduler_config(**snake_case_ )
_lowerCAmelCase : List[str] = scheduler_class(**snake_case_ )
_lowerCAmelCase : str = 1_0
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : str = model(snake_case_ , snake_case_ )
_lowerCAmelCase : int = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
return sample
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : str = 5_0
_lowerCAmelCase : List[str] = self.dummy_model()
_lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(snake_case_ )
# make sure that the first t is uneven
for i, t in enumerate(scheduler.timesteps[3:] ):
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
_lowerCAmelCase : int = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2574 ) < 1E-3
def __UpperCamelCase ( self ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
_lowerCAmelCase : Optional[int] = DPMSolverSinglestepScheduler(**self.get_scheduler_config() )
_lowerCAmelCase : List[Any] = self.full_loop(scheduler=snake_case_ )
_lowerCAmelCase : List[str] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
_lowerCAmelCase : List[str] = DEISMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Any = UniPCMultistepScheduler.from_config(scheduler.config )
_lowerCAmelCase : List[Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
_lowerCAmelCase : Tuple = self.full_loop(scheduler=snake_case_ )
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def __UpperCamelCase ( self ):
self.check_over_configs(thresholding=snake_case_ )
for order in [1, 2, 3]:
for solver_type in ["midpoint", "heun"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=snake_case_ , prediction_type=snake_case_ , sample_max_value=snake_case_ , algorithm_type="""dpmsolver++""" , solver_order=snake_case_ , solver_type=snake_case_ , )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
for algorithm_type in ["dpmsolver", "dpmsolver++"]:
for solver_type in ["midpoint", "heun"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
_lowerCAmelCase : Any = self.full_loop(
solver_order=snake_case_ , solver_type=snake_case_ , prediction_type=snake_case_ , algorithm_type=snake_case_ , )
assert not torch.isnan(snake_case_ ).any(), "Samples have nan numbers"
def __UpperCamelCase ( self ):
self.check_over_configs(lower_order_final=snake_case_ )
self.check_over_configs(lower_order_final=snake_case_ )
def __UpperCamelCase ( self ):
self.check_over_configs(lambda_min_clipped=-float("""inf""" ) )
self.check_over_configs(lambda_min_clipped=-5.1 )
def __UpperCamelCase ( self ):
self.check_over_configs(variance_type=snake_case_ )
self.check_over_configs(variance_type="""learned_range""" )
def __UpperCamelCase ( self ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=snake_case_ , time_step=0 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.full_loop()
_lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2791 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.full_loop(use_karras_sigmas=snake_case_ )
_lowerCAmelCase : Any = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.2248 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.full_loop(prediction_type="""v_prediction""" )
_lowerCAmelCase : List[Any] = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.1453 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = self.full_loop(prediction_type="""v_prediction""" , use_karras_sigmas=snake_case_ )
_lowerCAmelCase : str = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_mean.item() - 0.0649 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : List[Any] = self.get_scheduler_config(thresholding=snake_case_ , dynamic_thresholding_ratio=0 )
_lowerCAmelCase : List[Any] = scheduler_class(**snake_case_ )
_lowerCAmelCase : Optional[int] = 1_0
_lowerCAmelCase : Optional[int] = self.dummy_model()
_lowerCAmelCase : Union[str, Any] = self.dummy_sample_deter.half()
scheduler.set_timesteps(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Any = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = scheduler.step(snake_case_ , snake_case_ , snake_case_ ).prev_sample
assert sample.dtype == torch.floataa
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase_ = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from __future__ import annotations
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : list[int] , _lowerCamelCase : list[int] , _lowerCamelCase : list[list[str]] , _lowerCamelCase : int , ) -> None:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(_lowerCamelCase ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , _lowerCamelCase , _lowerCamelCase , )
def _UpperCAmelCase ( _lowerCamelCase : int ) -> None:
_lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , _lowerCamelCase , _lowerCamelCase )
# Print all the boards
for board in boards:
for column in board:
print(_lowerCamelCase )
print("""""" )
print(len(_lowerCamelCase ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 309
|
'''simple docstring'''
from __future__ import annotations
import time
from math import sqrt
# 1 for manhattan, 0 for euclidean
UpperCamelCase_ = 0
UpperCamelCase_ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
UpperCamelCase_ = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
UpperCamelCase_ = tuple[int, int]
class a_ :
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , ):
_lowerCAmelCase : Optional[int] = pos_x
_lowerCAmelCase : List[str] = pos_y
_lowerCAmelCase : Tuple = (pos_y, pos_x)
_lowerCAmelCase : List[Any] = goal_x
_lowerCAmelCase : int = goal_y
_lowerCAmelCase : Union[str, Any] = g_cost
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : List[Any] = self.calculate_heuristic()
_lowerCAmelCase : Optional[int] = self.g_cost + self.h_cost
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.pos_x - self.goal_x
_lowerCAmelCase : Optional[int] = self.pos_y - self.goal_y
if HEURISTIC == 1:
return abs(snake_case_ ) + abs(snake_case_ )
else:
return sqrt(dy**2 + dx**2 )
def __lt__( self , snake_case_ ):
return self.f_cost < other.f_cost
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , snake_case_ )
_lowerCAmelCase : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , snake_case_ )
_lowerCAmelCase : List[str] = [self.start]
_lowerCAmelCase : list[Node] = []
_lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
_lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
return self.retrace_path(snake_case_ )
self.closed_nodes.append(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_successors(snake_case_ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(snake_case_ )
else:
self.open_nodes.append(snake_case_ )
return [self.start.pos]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for action in delta:
_lowerCAmelCase : Union[str, Any] = parent.pos_x + action[1]
_lowerCAmelCase : Dict = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(snake_case_ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
snake_case_ , snake_case_ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , snake_case_ , ) )
return successors
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[Any] = node
_lowerCAmelCase : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
_lowerCAmelCase : Optional[int] = current_node.parent
path.reverse()
return path
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : int = AStar(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = False
def __UpperCamelCase ( self ):
while self.fwd_astar.open_nodes or self.bwd_astar.open_nodes:
self.fwd_astar.open_nodes.sort()
self.bwd_astar.open_nodes.sort()
_lowerCAmelCase : Tuple = self.fwd_astar.open_nodes.pop(0 )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.open_nodes.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
return self.retrace_bidirectional_path(
snake_case_ , snake_case_ )
self.fwd_astar.closed_nodes.append(snake_case_ )
self.bwd_astar.closed_nodes.append(snake_case_ )
_lowerCAmelCase : List[str] = current_bwd_node
_lowerCAmelCase : Dict = current_fwd_node
_lowerCAmelCase : Any = {
self.fwd_astar: self.fwd_astar.get_successors(snake_case_ ),
self.bwd_astar: self.bwd_astar.get_successors(snake_case_ ),
}
for astar in [self.fwd_astar, self.bwd_astar]:
for child_node in successors[astar]:
if child_node in astar.closed_nodes:
continue
if child_node not in astar.open_nodes:
astar.open_nodes.append(snake_case_ )
else:
# retrieve the best current path
_lowerCAmelCase : List[Any] = astar.open_nodes.pop(
astar.open_nodes.index(snake_case_ ) )
if child_node.g_cost < better_node.g_cost:
astar.open_nodes.append(snake_case_ )
else:
astar.open_nodes.append(snake_case_ )
return [self.fwd_astar.start.pos]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : int = self.fwd_astar.retrace_path(snake_case_ )
_lowerCAmelCase : Optional[Any] = self.bwd_astar.retrace_path(snake_case_ )
bwd_path.pop()
bwd_path.reverse()
_lowerCAmelCase : Dict = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
UpperCamelCase_ = (0, 0)
UpperCamelCase_ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
UpperCamelCase_ = time.time()
UpperCamelCase_ = AStar(init, goal)
UpperCamelCase_ = a_star.search()
UpperCamelCase_ = time.time() - start_time
print(F'AStar execution time = {end_time:f} seconds')
UpperCamelCase_ = time.time()
UpperCamelCase_ = BidirectionalAStar(init, goal)
UpperCamelCase_ = time.time() - bd_start_time
print(F'BidirectionalAStar execution time = {bd_end_time:f} seconds')
| 309
| 1
|
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=13_37 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def _UpperCAmelCase ( _lowerCamelCase : SplitDict ) -> str:
_lowerCAmelCase : Optional[Any] = split_dict._to_yaml_list()
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : int = SplitDict._from_yaml_list(_lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_lowerCAmelCase : Tuple = None
# the split name of split_dict takes over the name of the split info object
_lowerCAmelCase : Union[str, Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=_lowerCamelCase ), SplitInfo(dataset_name="""my_dataset""" )] )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[str, Any]:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_lowerCAmelCase : List[Any] = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : list[int] , _lowerCamelCase : str ) -> list[int]:
_lowerCAmelCase : List[Any] = int(_lowerCamelCase )
# Initialize Result
_lowerCAmelCase : Any = []
# Traverse through all denomination
for denomination in reversed(_lowerCamelCase ):
# Find denominations
while int(_lowerCamelCase ) >= int(_lowerCamelCase ):
total_value -= int(_lowerCamelCase )
answer.append(_lowerCamelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
UpperCamelCase_ = []
UpperCamelCase_ = """0"""
if (
input("""Do you want to enter your denominations ? (yY/n): """).strip().lower()
== "y"
):
UpperCamelCase_ = int(input("""Enter the number of denominations you want to add: """).strip())
for i in range(0, n):
denominations.append(int(input(F'Denomination {i}: ').strip()))
UpperCamelCase_ = input("""Enter the change you want to make in Indian Currency: """).strip()
else:
# All denominations of Indian Currency if user does not enter
UpperCamelCase_ = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
UpperCamelCase_ = input("""Enter the change you want to make: """).strip()
if int(value) == 0 or int(value) < 0:
print("""The total value cannot be zero or negative.""")
else:
print(F'Following is minimal change for {value}: ')
UpperCamelCase_ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=""" """)
| 309
| 1
|
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : List[str] , _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=False ) -> Optional[Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
if not is_sharded:
_lowerCAmelCase : List[Any] = os.path.abspath(_lowerCamelCase )
logger.info(f'Loading PyTorch weights from {pt_path}' )
_lowerCAmelCase : Any = torch.load(_lowerCamelCase , map_location="""cpu""" )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
_lowerCAmelCase : List[Any] = convert_pytorch_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
_lowerCAmelCase : List[str] = convert_pytorch_sharded_state_dict_to_flax(_lowerCamelCase , _lowerCamelCase )
return flax_state_dict
def _UpperCAmelCase ( _lowerCamelCase : Tuple[str] , _lowerCamelCase : np.ndarray , _lowerCamelCase : Dict[str, jnp.ndarray] , _lowerCamelCase : str , ) -> (Tuple[str], np.ndarray):
def is_key_or_prefix_key_in_dict(_lowerCamelCase : Tuple[str] ) -> bool:
return len(set(_lowerCamelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
_lowerCAmelCase : Any = pt_tuple_key[:-1] + ("""scale""",)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
_lowerCAmelCase : Optional[Any] = pt_tuple_key[:-1] + ("""mean""",)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
_lowerCAmelCase : List[Any] = pt_tuple_key[:-1] + ("""var""",)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
_lowerCAmelCase : int = pt_tuple_key[:-1] + ("""embedding""",)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(_lowerCamelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
_lowerCAmelCase : Dict = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : List[Any] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
_lowerCAmelCase : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(_lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
_lowerCAmelCase : Union[str, Any] = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
_lowerCAmelCase : List[str] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
_lowerCAmelCase : Any = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
_lowerCAmelCase : Tuple = pt_tuple_key[-2] + """_g"""
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
_lowerCAmelCase : Tuple = pt_tuple_key[-2] + """_v"""
if name is not None:
_lowerCAmelCase : Optional[int] = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str ) -> Tuple:
# convert pytorch tensor to numpy
_lowerCAmelCase : Dict = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : Optional[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
_lowerCAmelCase : Optional[int] = flax_model.params["""params"""]
else:
_lowerCAmelCase : List[str] = flax_model.params
_lowerCAmelCase : Any = flatten_dict(_lowerCamelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : int = flatten_dict(flax_model.params["""batch_stats"""] )
random_flax_state_dict.update(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = {}
_lowerCAmelCase : List[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : Any = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : Any = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_lowerCAmelCase : int = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : List[str] = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase , _lowerCAmelCase : int = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : Optional[Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : Tuple = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
_lowerCAmelCase : List[str] = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : int = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : int = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : Optional[int] ) -> Dict:
import torch
# Load the index
_lowerCAmelCase : Tuple = {}
for shard_file in shard_filenames:
# load using msgpack utils
_lowerCAmelCase : int = torch.load(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
_lowerCAmelCase : List[str] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
_lowerCAmelCase : Optional[Any] = flax_model.params["""params"""]
_lowerCAmelCase : Optional[int] = flatten_dict(_lowerCamelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params["""batch_stats"""] ) )
else:
_lowerCAmelCase : Optional[int] = flax_model.params
_lowerCAmelCase : Any = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : Optional[Any] = (model_prefix not in flax_model_params) and (
model_prefix in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
_lowerCAmelCase : Union[str, Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split(""".""" )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
_lowerCAmelCase : List[str] = tuple(pt_key.split(""".""" ) )
# remove base model prefix if necessary
_lowerCAmelCase : str = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : Tuple = pt_tuple_key[1:]
# Correctly rename weight parameters
_lowerCAmelCase , _lowerCAmelCase : List[Any] = rename_key_and_reshape_tensor(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# add model prefix if necessary
_lowerCAmelCase : Tuple = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
_lowerCAmelCase : str = jnp.asarray(_lowerCamelCase )
continue
if "var" in flax_key[-1]:
_lowerCAmelCase : Optional[Any] = jnp.asarray(_lowerCamelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(_lowerCamelCase , _lowerCamelCase )
continue
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Tuple = jnp.asarray(_lowerCamelCase )
else:
# also add unexpected weight so that warning is thrown
_lowerCAmelCase : Any = jnp.asarray(_lowerCamelCase )
return unflatten_dict(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Any , _lowerCamelCase : List[Any] ) -> List[str]:
_lowerCAmelCase : Dict = os.path.abspath(_lowerCamelCase )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
_lowerCAmelCase : Optional[Any] = getattr(_lowerCamelCase , """Flax""" + model.__class__.__name__ )
# load flax weight dict
with open(_lowerCamelCase , """rb""" ) as state_f:
try:
_lowerCAmelCase : Tuple = from_bytes(_lowerCamelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(_lowerCamelCase , _lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : int ) -> Union[str, Any]:
try:
import torch # noqa: F401
except ImportError:
logger.error(
"""Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see"""
""" https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation"""
""" instructions.""" )
raise
# check if we have bf16 weights
_lowerCAmelCase : Tuple = flatten_dict(jax.tree_util.tree_map(lambda _lowerCamelCase : x.dtype == jnp.bfloataa , _lowerCamelCase ) ).values()
if any(_lowerCamelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
"""Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` """
"""before loading those in PyTorch model.""" )
_lowerCAmelCase : Tuple = jax.tree_util.tree_map(
lambda _lowerCamelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , _lowerCamelCase )
_lowerCAmelCase : Any = flatten_dict(_lowerCamelCase )
_lowerCAmelCase : str = pt_model.state_dict()
_lowerCAmelCase : Optional[Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
_lowerCAmelCase : Optional[Any] = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split(""".""" )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
_lowerCAmelCase : Union[str, Any] = []
_lowerCAmelCase : List[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
_lowerCAmelCase : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
_lowerCAmelCase : List[Any] = """.""".join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
_lowerCAmelCase : Tuple = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
_lowerCAmelCase : str = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(_lowerCamelCase ) not in pt_model_dict:
# conv layer
_lowerCAmelCase : List[str] = flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase : Tuple = jnp.transpose(_lowerCamelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(_lowerCamelCase ) not in pt_model_dict:
# linear layer
_lowerCAmelCase : str = flax_key_tuple[:-1] + ("""weight""",)
_lowerCAmelCase : int = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
_lowerCAmelCase : str = flax_key_tuple[:-1] + ("""weight""",)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
_lowerCAmelCase : Optional[int] = flax_key_tuple[:-1] + ("""running_mean""",)
elif "var" in flax_key_tuple[-1]:
_lowerCAmelCase : List[str] = flax_key_tuple[:-1] + ("""running_var""",)
if "batch_stats" in flax_state:
_lowerCAmelCase : Tuple = """.""".join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
_lowerCAmelCase : Optional[Any] = """.""".join(_lowerCamelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
_lowerCAmelCase : Any = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
_lowerCAmelCase : Dict = key.split(""".""" )
_lowerCAmelCase : Any = None
if key_components[-3::2] == ["parametrizations", "original0"]:
_lowerCAmelCase : Tuple = key_components[-2] + """_g"""
elif key_components[-3::2] == ["parametrizations", "original1"]:
_lowerCAmelCase : Optional[Any] = key_components[-2] + """_v"""
if name is not None:
_lowerCAmelCase : Tuple = key_components[:-3] + [name]
_lowerCAmelCase : Dict = """.""".join(_lowerCamelCase )
_lowerCAmelCase : int = key
if flax_key in special_pt_names:
_lowerCAmelCase : List[Any] = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
_lowerCAmelCase : List[Any] = np.asarray(_lowerCamelCase ) if not isinstance(_lowerCamelCase , np.ndarray ) else flax_tensor
_lowerCAmelCase : str = torch.from_numpy(_lowerCamelCase )
# remove from missing keys
missing_keys.remove(_lowerCamelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(_lowerCamelCase )
pt_model.load_state_dict(_lowerCamelCase )
# re-transform missing_keys to list
_lowerCAmelCase : Dict = list(_lowerCamelCase )
if len(_lowerCamelCase ) > 0:
logger.warning(
"""Some weights of the Flax model were not used when initializing the PyTorch model"""
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
""" (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This"""
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
""" to be exactly identical (e.g. initializing a BertForSequenceClassification model from a"""
""" FlaxBertForSequenceClassification model).""" )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(_lowerCamelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
""" use it for predictions and inference.""" )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
"""If your task is similar to the task the model of the checkpoint was trained on, """
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase_ = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""google/fnet-base""": """https://huggingface.co/google/fnet-base/resolve/main/config.json""",
"""google/fnet-large""": """https://huggingface.co/google/fnet-large/resolve/main/config.json"""
# See all FNet models at https://huggingface.co/models?filter=fnet
}
class a_ (_a ):
__lowerCAmelCase : List[str] = """fnet"""
def __init__( self , snake_case_=3_2_0_0_0 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu_new" , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=4 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=False , snake_case_=5_1_2 , snake_case_=3 , snake_case_=1 , snake_case_=2 , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , bos_token_id=snake_case_ , eos_token_id=snake_case_ , **snake_case_ )
_lowerCAmelCase : str = vocab_size
_lowerCAmelCase : List[str] = max_position_embeddings
_lowerCAmelCase : Union[str, Any] = hidden_size
_lowerCAmelCase : Any = num_hidden_layers
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Tuple = hidden_act
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : int = initializer_range
_lowerCAmelCase : List[Any] = type_vocab_size
_lowerCAmelCase : str = layer_norm_eps
_lowerCAmelCase : Any = use_tpu_fourier_optimizations
_lowerCAmelCase : Optional[int] = tpu_short_seq_length
| 309
|
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import ClapFeatureExtractor, ClapProcessor, RobertaTokenizer, RobertaTokenizerFast
from transformers.testing_utils import require_sentencepiece, require_torchaudio
from .test_feature_extraction_clap import floats_list
@require_torchaudio
@require_sentencepiece
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = """laion/clap-htsat-unfused"""
_lowerCAmelCase : int = tempfile.mkdtemp()
def __UpperCamelCase ( self , **snake_case_ ):
return RobertaTokenizer.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self , **snake_case_ ):
return ClapFeatureExtractor.from_pretrained(self.checkpoint , **snake_case_ )
def __UpperCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = self.get_feature_extractor()
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Any = ClapProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = ClapProcessor(tokenizer=self.get_tokenizer() , feature_extractor=self.get_feature_extractor() )
processor.save_pretrained(self.tmpdirname )
_lowerCAmelCase : Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
_lowerCAmelCase : int = self.get_feature_extractor(do_normalize=snake_case_ , padding_value=1.0 )
_lowerCAmelCase : Dict = ClapProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , snake_case_ )
self.assertEqual(processor.feature_extractor.to_json_string() , feature_extractor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.feature_extractor , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : Optional[int] = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = floats_list((3, 1_0_0_0) )
_lowerCAmelCase : List[str] = feature_extractor(snake_case_ , return_tensors="""np""" )
_lowerCAmelCase : Optional[Any] = processor(audios=snake_case_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __UpperCamelCase ( self ):
_lowerCAmelCase : int = self.get_feature_extractor()
_lowerCAmelCase : List[str] = self.get_tokenizer()
_lowerCAmelCase : Tuple = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Union[str, Any] = """This is a test string"""
_lowerCAmelCase : Union[str, Any] = processor(text=snake_case_ )
_lowerCAmelCase : Optional[int] = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Dict = self.get_feature_extractor()
_lowerCAmelCase : Any = self.get_tokenizer()
_lowerCAmelCase : List[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
_lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_lowerCAmelCase : List[Any] = processor.batch_decode(snake_case_ )
_lowerCAmelCase : Dict = tokenizer.batch_decode(snake_case_ )
self.assertListEqual(snake_case_ , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.get_feature_extractor()
_lowerCAmelCase : Dict = self.get_tokenizer()
_lowerCAmelCase : Optional[Any] = ClapProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
self.assertListEqual(
processor.model_input_names[2:] , feature_extractor.model_input_names , msg="""`processor` and `feature_extractor` model input names do not match""" , )
| 309
| 1
|
'''simple docstring'''
from collections.abc import Callable
def _UpperCAmelCase ( _lowerCamelCase : Callable[[float], float] , _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
_lowerCAmelCase : float = a
_lowerCAmelCase : float = b
if function(_lowerCamelCase ) == 0: # one of the a or b is a root for the function
return a
elif function(_lowerCamelCase ) == 0:
return b
elif (
function(_lowerCamelCase ) * function(_lowerCamelCase ) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""" )
else:
_lowerCAmelCase : float = start + (end - start) / 2.0
while abs(start - mid ) > 10**-7: # until precisely equals to 10^-7
if function(_lowerCamelCase ) == 0:
return mid
elif function(_lowerCamelCase ) * function(_lowerCamelCase ) < 0:
_lowerCAmelCase : Optional[Any] = mid
else:
_lowerCAmelCase : str = mid
_lowerCAmelCase : List[str] = start + (end - start) / 2.0
return mid
def _UpperCAmelCase ( _lowerCamelCase : float ) -> float:
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 10_00))
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = """▁"""
UpperCamelCase_ = {
"""vocab_file""": """vocab.json""",
"""spm_file""": """sentencepiece.bpe.model""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
UpperCamelCase_ = {
"""vocab_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json""",
},
"""spm_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model""",
},
"""tokenizer_config_file""": {
"""facebook/m2m100_418M""": """https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json""",
"""facebook/m2m100_1.2B""": """https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json""",
},
}
UpperCamelCase_ = {
"""facebook/m2m100_418M""": 10_24,
}
# fmt: off
UpperCamelCase_ = {
"""m2m100""": ["""af""", """am""", """ar""", """ast""", """az""", """ba""", """be""", """bg""", """bn""", """br""", """bs""", """ca""", """ceb""", """cs""", """cy""", """da""", """de""", """el""", """en""", """es""", """et""", """fa""", """ff""", """fi""", """fr""", """fy""", """ga""", """gd""", """gl""", """gu""", """ha""", """he""", """hi""", """hr""", """ht""", """hu""", """hy""", """id""", """ig""", """ilo""", """is""", """it""", """ja""", """jv""", """ka""", """kk""", """km""", """kn""", """ko""", """lb""", """lg""", """ln""", """lo""", """lt""", """lv""", """mg""", """mk""", """ml""", """mn""", """mr""", """ms""", """my""", """ne""", """nl""", """no""", """ns""", """oc""", """or""", """pa""", """pl""", """ps""", """pt""", """ro""", """ru""", """sd""", """si""", """sk""", """sl""", """so""", """sq""", """sr""", """ss""", """su""", """sv""", """sw""", """ta""", """th""", """tl""", """tn""", """tr""", """uk""", """ur""", """uz""", """vi""", """wo""", """xh""", """yi""", """yo""", """zh""", """zu"""],
"""wmt21""": ["""en""", """ha""", """is""", """ja""", """cs""", """ru""", """zh""", """de"""]
}
class a_ (_a ):
__lowerCAmelCase : Optional[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : Dict = ["""input_ids""", """attention_mask"""]
__lowerCAmelCase : List[int] = []
__lowerCAmelCase : List[int] = []
def __init__( self , snake_case_ , snake_case_ , snake_case_=None , snake_case_=None , snake_case_="<s>" , snake_case_="</s>" , snake_case_="</s>" , snake_case_="<pad>" , snake_case_="<unk>" , snake_case_="m2m100" , snake_case_ = None , snake_case_=8 , **snake_case_ , ):
_lowerCAmelCase : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
_lowerCAmelCase : Optional[Any] = language_codes
_lowerCAmelCase : Tuple = FAIRSEQ_LANGUAGE_CODES[language_codes]
_lowerCAmelCase : str = {lang_code: f'__{lang_code}__' for lang_code in fairseq_language_code}
_lowerCAmelCase : int = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(snake_case_ )
for lang_code in fairseq_language_code
if self.get_lang_token(snake_case_ ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=snake_case_ , tgt_lang=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , sep_token=snake_case_ , unk_token=snake_case_ , pad_token=snake_case_ , language_codes=snake_case_ , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Optional[int] = vocab_file
_lowerCAmelCase : Any = load_json(snake_case_ )
_lowerCAmelCase : str = {v: k for k, v in self.encoder.items()}
_lowerCAmelCase : Union[str, Any] = spm_file
_lowerCAmelCase : Tuple = load_spm(snake_case_ , self.sp_model_kwargs )
_lowerCAmelCase : int = len(self.encoder )
_lowerCAmelCase : Union[str, Any] = {
self.get_lang_token(snake_case_ ): self.encoder_size + i for i, lang_code in enumerate(snake_case_ )
}
_lowerCAmelCase : List[str] = {lang_code: self.encoder_size + i for i, lang_code in enumerate(snake_case_ )}
_lowerCAmelCase : Optional[Any] = {v: k for k, v in self.lang_token_to_id.items()}
_lowerCAmelCase : Any = src_lang if src_lang is not None else """en"""
_lowerCAmelCase : Optional[int] = tgt_lang
_lowerCAmelCase : Tuple = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_lowerCAmelCase : List[Any] = num_madeup_words
@property
def __UpperCamelCase ( self ):
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def __UpperCamelCase ( self ):
return self._src_lang
@src_lang.setter
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCamelCase ( self , snake_case_ ):
return self.sp_model.encode(snake_case_ , out_type=snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(snake_case_ , self.encoder[self.unk_token] )
def __UpperCamelCase ( self , snake_case_ ):
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(snake_case_ , self.unk_token )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(snake_case_ ) + token
_lowerCAmelCase : Optional[Any] = []
else:
current_sub_tokens.append(snake_case_ )
out_string += self.sp_model.decode(snake_case_ )
return out_string.strip()
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case_ , token_ids_a=snake_case_ , already_has_special_tokens=snake_case_ )
_lowerCAmelCase : List[Any] = [1] * len(self.prefix_tokens )
_lowerCAmelCase : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(snake_case_ )) + suffix_ones
return prefix_ones + ([0] * len(snake_case_ )) + ([0] * len(snake_case_ )) + suffix_ones
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = {self.convert_ids_to_tokens(snake_case_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ):
_lowerCAmelCase : int = self.__dict__.copy()
_lowerCAmelCase : str = None
return state
def __setstate__( self , snake_case_ ):
_lowerCAmelCase : List[str] = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_lowerCAmelCase : str = {}
_lowerCAmelCase : str = load_spm(self.spm_file , self.sp_model_kwargs )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None ):
_lowerCAmelCase : Dict = Path(snake_case_ )
if not save_dir.is_dir():
raise OSError(f'{save_directory} should be a directory' )
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""vocab_file"""]
)
_lowerCAmelCase : Any = save_dir / (
(filename_prefix + """-""" if filename_prefix else """""") + self.vocab_files_names["""spm_file"""]
)
save_json(self.encoder , snake_case_ )
if os.path.abspath(self.spm_file ) != os.path.abspath(snake_case_ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , snake_case_ )
elif not os.path.isfile(self.spm_file ):
with open(snake_case_ , """wb""" ) as fi:
_lowerCAmelCase : List[str] = self.sp_model.serialized_model_proto()
fi.write(snake_case_ )
return (str(snake_case_ ), str(snake_case_ ))
def __UpperCamelCase ( self , snake_case_ , snake_case_ = "en" , snake_case_ = None , snake_case_ = "ro" , **snake_case_ , ):
_lowerCAmelCase : Union[str, Any] = src_lang
_lowerCAmelCase : Optional[Any] = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(snake_case_ , snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , **snake_case_ ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
_lowerCAmelCase : Dict = src_lang
_lowerCAmelCase : str = self(snake_case_ , add_special_tokens=snake_case_ , **snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.get_lang_id(snake_case_ )
_lowerCAmelCase : Tuple = tgt_lang_id
return inputs
def __UpperCamelCase ( self ):
self.set_src_lang_special_tokens(self.src_lang )
def __UpperCamelCase ( self ):
self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = self.get_lang_token(snake_case_ )
_lowerCAmelCase : List[Any] = self.lang_token_to_id[lang_token]
_lowerCAmelCase : Any = [self.cur_lang_id]
_lowerCAmelCase : Any = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.get_lang_token(snake_case_ )
_lowerCAmelCase : int = self.lang_token_to_id[lang_token]
_lowerCAmelCase : str = [self.cur_lang_id]
_lowerCAmelCase : str = [self.eos_token_id]
def __UpperCamelCase ( self , snake_case_ ):
return self.lang_code_to_token[lang]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : List[str] = self.get_lang_token(snake_case_ )
return self.lang_token_to_id[lang_token]
def _UpperCAmelCase ( _lowerCamelCase : str , _lowerCamelCase : Dict[str, Any] ) -> sentencepiece.SentencePieceProcessor:
_lowerCAmelCase : Optional[Any] = sentencepiece.SentencePieceProcessor(**_lowerCamelCase )
spm.Load(str(_lowerCamelCase ) )
return spm
def _UpperCAmelCase ( _lowerCamelCase : str ) -> Union[Dict, List]:
with open(_lowerCamelCase , """r""" ) as f:
return json.load(_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : str ) -> None:
with open(_lowerCamelCase , """w""" ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase , indent=2 )
| 309
| 1
|
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class a_ :
def __init__( self , snake_case_ , snake_case_=None , snake_case_=None , snake_case_=None , snake_case_="resnet50" , snake_case_=3 , snake_case_=3_2 , snake_case_=3 , snake_case_=True , snake_case_=True , ):
_lowerCAmelCase : Optional[int] = parent
_lowerCAmelCase : Tuple = out_indices if out_indices is not None else [4]
_lowerCAmelCase : Union[str, Any] = stage_names
_lowerCAmelCase : List[str] = out_features
_lowerCAmelCase : Union[str, Any] = backbone
_lowerCAmelCase : Tuple = batch_size
_lowerCAmelCase : List[str] = image_size
_lowerCAmelCase : Tuple = num_channels
_lowerCAmelCase : Optional[Any] = use_pretrained_backbone
_lowerCAmelCase : int = is_training
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = self.get_config()
return config, pixel_values
def __UpperCamelCase ( self ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[Any] = TimmBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
_lowerCAmelCase : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : Optional[Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class a_ (_a , _a , _a , unittest.TestCase ):
__lowerCAmelCase : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
__lowerCAmelCase : Union[str, Any] = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : str = False
__lowerCAmelCase : List[str] = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = TimmBackboneModelTester(self )
_lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ )
def __UpperCamelCase ( self ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = """resnet18"""
_lowerCAmelCase : int = """microsoft/resnet-18"""
_lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ )
_lowerCAmelCase : Union[str, Any] = AutoBackbone.from_pretrained(snake_case_ )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
_lowerCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(snake_case_ , use_timm_backbone=snake_case_ , out_indices=[1, 2, 3] )
_lowerCAmelCase : Optional[Any] = AutoBackbone.from_pretrained(snake_case_ , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("""TimmBackbone doesn't support feed forward chunking""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have num_hidden_layers attribute""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone models doesn't have inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""model weights aren't tied in TimmBackbone.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn't have hidden size info in its configuration.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""TimmBackbone doesn't support output_attentions.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def __UpperCamelCase ( self ):
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Dict = model_class(snake_case_ )
_lowerCAmelCase : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Tuple = [*signature.parameters.keys()]
_lowerCAmelCase : List[str] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCAmelCase : int = True
_lowerCAmelCase : Any = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowerCAmelCase : Tuple = self.all_model_classes[0]
_lowerCAmelCase : List[str] = model_class(snake_case_ )
model.to(snake_case_ )
_lowerCAmelCase : List[str] = self._prepare_for_class(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(**snake_case_ )
_lowerCAmelCase : int = outputs[0][-1]
# Encoder-/Decoder-only models
_lowerCAmelCase : str = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowerCAmelCase : Optional[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=snake_case_ )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(**snake_case_ )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowerCAmelCase : Dict = copy.deepcopy(snake_case_ )
_lowerCAmelCase : Dict = None
_lowerCAmelCase : int = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Any = model(**snake_case_ )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
_lowerCAmelCase : str = copy.deepcopy(snake_case_ )
_lowerCAmelCase : Union[str, Any] = False
_lowerCAmelCase : Union[str, Any] = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(**snake_case_ )
| 309
|
'''simple docstring'''
from collections.abc import Callable
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : Callable , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float , _lowerCamelCase : float ) -> np.ndarray:
_lowerCAmelCase : Union[str, Any] = int(np.ceil((x_end - xa) / step_size ) )
_lowerCAmelCase : Tuple = np.zeros((n + 1,) )
_lowerCAmelCase : List[Any] = ya
_lowerCAmelCase : int = xa
for k in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
| 1
|
'''simple docstring'''
import string
import numpy
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> int:
return b if a == 0 else greatest_common_divisor(b % a , _lowerCamelCase )
class a_ :
__lowerCAmelCase : Optional[int] = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
__lowerCAmelCase : Optional[int] = numpy.vectorize(lambda _a : x % 3_6 )
__lowerCAmelCase : Optional[int] = numpy.vectorize(_a )
def __init__( self , snake_case_ ):
_lowerCAmelCase : str = self.modulus(snake_case_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowerCAmelCase : Dict = encrypt_key.shape[0]
def __UpperCamelCase ( self , snake_case_ ):
return self.key_string.index(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
return self.key_string[round(snake_case_ )]
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : Optional[int] = det % len(self.key_string )
_lowerCAmelCase : Dict = len(self.key_string )
if greatest_common_divisor(snake_case_ , len(self.key_string ) ) != 1:
_lowerCAmelCase : Tuple = (
f'determinant modular {req_l} of encryption key({det}) '
f'is not co prime w.r.t {req_l}.\nTry another key.'
)
raise ValueError(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[Any] = [char for char in text.upper() if char in self.key_string]
_lowerCAmelCase : Optional[Any] = chars[-1]
while len(snake_case_ ) % self.break_key != 0:
chars.append(snake_case_ )
return "".join(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = self.process_text(text.upper() )
_lowerCAmelCase : Tuple = """"""
for i in range(0 , len(snake_case_ ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : Union[str, Any] = text[i : i + self.break_key]
_lowerCAmelCase : List[Any] = [self.replace_letters(snake_case_ ) for char in batch]
_lowerCAmelCase : str = numpy.array([vec] ).T
_lowerCAmelCase : Tuple = self.modulus(self.encrypt_key.dot(snake_case_ ) ).T.tolist()[
0
]
_lowerCAmelCase : str = """""".join(
self.replace_digits(snake_case_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : Dict = det % len(self.key_string )
_lowerCAmelCase : Any = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowerCAmelCase : str = i
break
_lowerCAmelCase : Dict = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(snake_case_ ) )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : int = self.make_decrypt_key()
_lowerCAmelCase : List[str] = self.process_text(text.upper() )
_lowerCAmelCase : List[str] = """"""
for i in range(0 , len(snake_case_ ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : str = text[i : i + self.break_key]
_lowerCAmelCase : List[Any] = [self.replace_letters(snake_case_ ) for char in batch]
_lowerCAmelCase : List[str] = numpy.array([vec] ).T
_lowerCAmelCase : str = self.modulus(decrypt_key.dot(snake_case_ ) ).T.tolist()[0]
_lowerCAmelCase : Any = """""".join(
self.replace_digits(snake_case_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _UpperCAmelCase ( ) -> None:
_lowerCAmelCase : Optional[int] = int(input("""Enter the order of the encryption key: """ ) )
_lowerCAmelCase : int = []
print("""Enter each row of the encryption key with space separated integers""" )
for _ in range(_lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = [int(_lowerCamelCase ) for x in input().split()]
hill_matrix.append(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = HillCipher(numpy.array(_lowerCamelCase ) )
print("""Would you like to encrypt or decrypt some text? (1 or 2)""" )
_lowerCAmelCase : str = input("""\n1. Encrypt\n2. Decrypt\n""" )
if option == "1":
_lowerCAmelCase : Optional[int] = input("""What text would you like to encrypt?: """ )
print("""Your encrypted text is:""" )
print(hc.encrypt(_lowerCamelCase ) )
elif option == "2":
_lowerCAmelCase : int = input("""What text would you like to decrypt?: """ )
print("""Your decrypted text is:""" )
print(hc.decrypt(_lowerCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 309
|
'''simple docstring'''
from unittest.mock import Mock, patch
from file_transfer.send_file import send_file
@patch("""socket.socket""" )
@patch("""builtins.open""" )
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[int] ) -> Union[str, Any]:
# ===== initialization =====
_lowerCAmelCase : Tuple = Mock()
_lowerCAmelCase : Any = conn, Mock()
_lowerCAmelCase : Optional[Any] = iter([1, None] )
_lowerCAmelCase : str = lambda _lowerCamelCase : next(_lowerCamelCase )
# ===== invoke =====
send_file(filename="""mytext.txt""" , testing=_lowerCamelCase )
# ===== ensurance =====
sock.assert_called_once()
sock.return_value.bind.assert_called_once()
sock.return_value.listen.assert_called_once()
sock.return_value.accept.assert_called_once()
conn.recv.assert_called_once()
file.return_value.__enter__.assert_called_once()
file.return_value.__enter__.return_value.read.assert_called()
conn.send.assert_called_once()
conn.close.assert_called_once()
sock.return_value.shutdown.assert_called_once()
sock.return_value.close.assert_called_once()
| 309
| 1
|
'''simple docstring'''
import sys
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
_lowerCAmelCase : List[str] = len(_lowerCamelCase )
_lowerCAmelCase : Tuple = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
_lowerCAmelCase : Dict = [[0 for x in range(_lowerCamelCase )] for x in range(_lowerCamelCase )]
for chain_length in range(2 , _lowerCamelCase ):
for a in range(1 , n - chain_length + 1 ):
_lowerCAmelCase : Tuple = a + chain_length - 1
_lowerCAmelCase : Tuple = sys.maxsize
for c in range(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Optional[Any] = (
matrix[a][c] + matrix[c + 1][b] + array[a - 1] * array[c] * array[b]
)
if cost < matrix[a][b]:
_lowerCAmelCase : Any = cost
_lowerCAmelCase : List[str] = c
return matrix, sol
def _UpperCAmelCase ( _lowerCamelCase : Dict , _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any ) -> Tuple:
if i == j:
print("""A""" + str(_lowerCamelCase ) , end=""" """ )
else:
print("""(""" , end=""" """ )
print_optiomal_solution(_lowerCamelCase , _lowerCamelCase , optimal_solution[i][j] )
print_optiomal_solution(_lowerCamelCase , optimal_solution[i][j] + 1 , _lowerCamelCase )
print(""")""" , end=""" """ )
def _UpperCAmelCase ( ) -> Any:
_lowerCAmelCase : Dict = [30, 35, 15, 5, 10, 20, 25]
_lowerCAmelCase : List[Any] = len(_lowerCamelCase )
# Size of matrix created from above array will be
# 30*35 35*15 15*5 5*10 10*20 20*25
_lowerCAmelCase , _lowerCAmelCase : Tuple = matrix_chain_order(_lowerCamelCase )
print("""No. of Operation required: """ + str(matrix[1][n - 1] ) )
print_optiomal_solution(_lowerCamelCase , 1 , n - 1 )
if __name__ == "__main__":
main()
| 309
|
'''simple docstring'''
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self , snake_case_ , snake_case_=1_3 , snake_case_=3_0 , snake_case_=2 , snake_case_=3 , snake_case_=True , snake_case_=True , snake_case_=3_2 , snake_case_=5 , snake_case_=4 , snake_case_=3_7 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=1_0 , snake_case_=0.02 , snake_case_=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : Tuple = image_size
_lowerCAmelCase : int = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = is_training
_lowerCAmelCase : Any = use_labels
_lowerCAmelCase : List[Any] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Dict = num_attention_heads
_lowerCAmelCase : Union[str, Any] = intermediate_size
_lowerCAmelCase : Dict = hidden_act
_lowerCAmelCase : str = hidden_dropout_prob
_lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : str = initializer_range
_lowerCAmelCase : Optional[Any] = scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowerCAmelCase : List[Any] = (image_size // patch_size) ** 2
_lowerCAmelCase : Dict = num_patches + 1
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCAmelCase : List[str] = None
if self.use_labels:
_lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = ViTMSNModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[Any] = model(snake_case_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = self.type_sequence_label_size
_lowerCAmelCase : int = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : Optional[int] = model(snake_case_ , labels=snake_case_ )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCAmelCase : int = 1
_lowerCAmelCase : List[str] = ViTMSNForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
_lowerCAmelCase : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCAmelCase : Optional[int] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : List[Any] = config_and_inputs
_lowerCAmelCase : Any = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ (_a , _a , unittest.TestCase ):
__lowerCAmelCase : Tuple = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
__lowerCAmelCase : Optional[int] = (
{"""feature-extraction""": ViTMSNModel, """image-classification""": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
__lowerCAmelCase : Dict = False
__lowerCAmelCase : Optional[Any] = False
__lowerCAmelCase : List[str] = False
__lowerCAmelCase : Any = False
def __UpperCamelCase ( self ):
_lowerCAmelCase : Tuple = ViTMSNModelTester(self )
_lowerCAmelCase : int = ConfigTester(self , config_class=snake_case_ , has_text_modality=snake_case_ , hidden_size=3_7 )
def __UpperCamelCase ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def __UpperCamelCase ( self ):
pass
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : List[str] = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_lowerCAmelCase : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def __UpperCamelCase ( self ):
_lowerCAmelCase , _lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCAmelCase : Optional[int] = model_class(snake_case_ )
_lowerCAmelCase : List[str] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCAmelCase : Optional[Any] = [*signature.parameters.keys()]
_lowerCAmelCase : List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@slow
def __UpperCamelCase ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Optional[int] = ViTMSNModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ (unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self ):
torch.manual_seed(2 )
_lowerCAmelCase : Dict = ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(snake_case_ )
_lowerCAmelCase : Dict = self.default_image_processor
_lowerCAmelCase : Any = prepare_img()
_lowerCAmelCase : List[str] = image_processor(images=snake_case_ , return_tensors="""pt""" ).to(snake_case_ )
# forward pass
with torch.no_grad():
_lowerCAmelCase : Dict = model(**snake_case_ )
# verify the logits
_lowerCAmelCase : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
_lowerCAmelCase : Tuple = torch.tensor([-0.0803, -0.4454, -0.2375] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
| 309
| 1
|
'''simple docstring'''
UpperCamelCase_ = {
0: """0""",
1: """1""",
2: """2""",
3: """3""",
4: """4""",
5: """5""",
6: """6""",
7: """7""",
8: """8""",
9: """9""",
10: """a""",
11: """b""",
12: """c""",
13: """d""",
14: """e""",
15: """f""",
}
def _UpperCAmelCase ( _lowerCamelCase : float ) -> str:
assert type(_lowerCamelCase ) in (int, float) and decimal == int(_lowerCamelCase )
_lowerCAmelCase : Any = int(_lowerCamelCase )
_lowerCAmelCase : List[str] = """"""
_lowerCAmelCase : Optional[Any] = False
if decimal < 0:
_lowerCAmelCase : Any = True
decimal *= -1
while decimal > 0:
_lowerCAmelCase , _lowerCAmelCase : Union[str, Any] = divmod(_lowerCamelCase , 16 )
_lowerCAmelCase : Dict = values[remainder] + hexadecimal
_lowerCAmelCase : List[Any] = """0x""" + hexadecimal
if negative:
_lowerCAmelCase : List[str] = """-""" + hexadecimal
return hexadecimal
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class a_ (_a ):
__lowerCAmelCase : List[Any] = """microsoft/speecht5_tts"""
__lowerCAmelCase : List[Any] = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
__lowerCAmelCase : List[str] = """text_reader"""
__lowerCAmelCase : Optional[Any] = SpeechTaProcessor
__lowerCAmelCase : str = SpeechTaForTextToSpeech
__lowerCAmelCase : int = SpeechTaHifiGan
__lowerCAmelCase : int = ["""text"""]
__lowerCAmelCase : int = ["""audio"""]
def __UpperCamelCase ( self ):
if self.post_processor is None:
_lowerCAmelCase : int = """microsoft/speecht5_hifigan"""
super().setup()
def __UpperCamelCase ( self , snake_case_ , snake_case_=None ):
_lowerCAmelCase : Tuple = self.pre_processor(text=snake_case_ , return_tensors="""pt""" , truncation=snake_case_ )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
_lowerCAmelCase : List[str] = load_dataset("""Matthijs/cmu-arctic-xvectors""" , split="""validation""" )
_lowerCAmelCase : Any = torch.tensor(embeddings_dataset[7_3_0_5]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.model.generate_speech(**snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
with torch.no_grad():
return self.post_processor(snake_case_ ).cpu().detach()
| 309
| 1
|
'''simple docstring'''
from typing import List
import jiwer
import jiwer.transforms as tr
from packaging import version
import datasets
from datasets.config import PY_VERSION
if PY_VERSION < version.parse("""3.8"""):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
UpperCamelCase_ = """"""
if version.parse(importlib_metadata.version("""jiwer""")) < version.parse("""2.3.0"""):
class a_ (tr.AbstractTransform ):
def __init__( self , snake_case_ = " " ):
_lowerCAmelCase : List[Any] = sentence_delimiter
def __UpperCamelCase ( self , snake_case_ ):
return list(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = []
for sent_idx, sentence in enumerate(snake_case_ ):
chars.extend(self.process_string(snake_case_ ) )
if self.sentence_delimiter is not None and self.sentence_delimiter != "" and sent_idx < len(snake_case_ ) - 1:
chars.append(self.sentence_delimiter )
return chars
UpperCamelCase_ = tr.Compose(
[tr.RemoveMultipleSpaces(), tr.Strip(), SentencesToListOfCharacters(SENTENCE_DELIMITER)]
)
else:
UpperCamelCase_ = tr.Compose(
[
tr.RemoveMultipleSpaces(),
tr.Strip(),
tr.ReduceToSingleSentence(SENTENCE_DELIMITER),
tr.ReduceToListOfListOfChars(),
]
)
UpperCamelCase_ = """\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
"""
UpperCamelCase_ = """\
Character error rate (CER) is a common metric of the performance of an automatic speech recognition system.
CER is similar to Word Error Rate (WER), but operates on character instead of word. Please refer to docs of WER for further information.
Character error rate can be computed as:
CER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct characters,
N is the number of characters in the reference (N=S+D+C).
CER's output is not always a number between 0 and 1, in particular when there is a high number of insertions. This value is often associated to the percentage of characters that were incorrectly predicted. The lower the value, the better the
performance of the ASR system with a CER of 0 being a perfect score.
"""
UpperCamelCase_ = """
Computes CER score of transcribed segments against references.
Args:
references: list of references for each speech input.
predictions: list of transcribtions to score.
concatenate_texts: Whether or not to concatenate sentences before evaluation, set to True for more accurate result.
Returns:
(float): the character error rate
Examples:
>>> predictions = [\"this is the prediction\", \"there is an other sample\"]
>>> references = [\"this is the reference\", \"there is another one\"]
>>> cer = datasets.load_metric(\"cer\")
>>> cer_score = cer.compute(predictions=predictions, references=references)
>>> print(cer_score)
0.34146341463414637
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ (datasets.Metric ):
def __UpperCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
"""https://sites.google.com/site/textdigitisation/qualitymeasures/computingerrorrates""",
] , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_=False ):
if concatenate_texts:
return jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )["wer"]
_lowerCAmelCase : Dict = 0
_lowerCAmelCase : List[str] = 0
for prediction, reference in zip(snake_case_ , snake_case_ ):
_lowerCAmelCase : Optional[int] = jiwer.compute_measures(
snake_case_ , snake_case_ , truth_transform=snake_case_ , hypothesis_transform=snake_case_ , )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 309
|
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def _UpperCAmelCase ( ) -> Tuple:
_lowerCAmelCase : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
return image
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> Dict:
_lowerCAmelCase : str = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.weight', f'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm1.bias', f'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.weight', f'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.norm2.bias', f'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.qkv.weight', f'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.weight', f'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((f'visual_encoder.blocks.{i}.attn.proj.bias', f'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.weight', f'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc1.bias', f'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.weight', f'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((f'visual_encoder.blocks.{i}.mlp.fc2.bias', f'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : List[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[Any] ) -> Optional[Any]:
_lowerCAmelCase : str = dct.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple ) -> Tuple:
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_lowerCAmelCase : Tuple = state_dict.pop(f'visual_encoder.blocks.{i}.attn.q_bias' )
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_lowerCAmelCase : int = torch.cat((q_bias, torch.zeros_like(_lowerCamelCase , requires_grad=_lowerCamelCase ), v_bias) )
_lowerCAmelCase : str = qkv_bias
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : str = 3_64 if """coco""" in model_name else 2_24
_lowerCAmelCase : str = BlipaVisionConfig(image_size=_lowerCamelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_lowerCAmelCase : int = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "opt-6.7b" in model_name:
_lowerCAmelCase : Union[str, Any] = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=_lowerCamelCase ).to_dict()
elif "t5-xl" in model_name:
_lowerCAmelCase : Optional[int] = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_lowerCAmelCase : str = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_lowerCAmelCase : Dict = BlipaConfig(vision_config=_lowerCamelCase , text_config=_lowerCamelCase )
return config, image_size
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : List[Any]=None , _lowerCamelCase : int=False ) -> List[str]:
_lowerCAmelCase : int = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_lowerCAmelCase : List[Any] = tokenizer("""\n""" , add_special_tokens=_lowerCamelCase ).input_ids[0]
_lowerCAmelCase , _lowerCAmelCase : List[str] = get_blipa_config(_lowerCamelCase , eos_token_id=_lowerCamelCase )
_lowerCAmelCase : Optional[int] = BlipaForConditionalGeneration(_lowerCamelCase ).eval()
_lowerCAmelCase : Union[str, Any] = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_lowerCAmelCase , _lowerCAmelCase : List[str] = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = load_model_and_preprocess(
name=_lowerCamelCase , model_type=_lowerCamelCase , is_eval=_lowerCamelCase , device=_lowerCamelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_lowerCAmelCase : List[Any] = original_model.state_dict()
_lowerCAmelCase : Optional[int] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_lowerCAmelCase : Tuple = state_dict.pop(_lowerCamelCase )
if key.startswith("""Qformer.bert""" ):
_lowerCAmelCase : List[Any] = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_lowerCAmelCase : Optional[int] = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_lowerCAmelCase : Dict = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_lowerCAmelCase : Tuple = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_lowerCAmelCase : List[Any] = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_lowerCAmelCase : int = key.replace("""t5""" , """language""" )
_lowerCAmelCase : Tuple = val
# read in qv biases
read_in_q_v_bias(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase , _lowerCAmelCase : Optional[int] = hf_model.load_state_dict(_lowerCamelCase , strict=_lowerCamelCase )
assert len(_lowerCamelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_lowerCAmelCase : Union[str, Any] = load_demo_image()
_lowerCAmelCase : Optional[int] = vis_processors["""eval"""](_lowerCamelCase ).unsqueeze(0 ).to(_lowerCamelCase )
_lowerCAmelCase : List[str] = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
# create processor
_lowerCAmelCase : Optional[int] = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=_lowerCamelCase , image_std=_lowerCamelCase )
_lowerCAmelCase : Tuple = BlipaProcessor(image_processor=_lowerCamelCase , tokenizer=_lowerCamelCase )
_lowerCAmelCase : Any = processor(images=_lowerCamelCase , return_tensors="""pt""" ).pixel_values.to(_lowerCamelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(_lowerCamelCase , _lowerCamelCase )
original_model.to(_lowerCamelCase )
hf_model.to(_lowerCamelCase )
with torch.no_grad():
if "opt" in model_name:
_lowerCAmelCase : Optional[Any] = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_lowerCAmelCase : Optional[Any] = hf_model(_lowerCamelCase , _lowerCamelCase ).logits
else:
_lowerCAmelCase : List[Any] = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_lowerCAmelCase : Tuple = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_00 )
_lowerCAmelCase : Dict = hf_model(_lowerCamelCase , _lowerCamelCase , labels=_lowerCamelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_lowerCAmelCase : Any = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=_lowerCamelCase )
assert torch.allclose(logits[0, :3, :3] , _lowerCamelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_lowerCAmelCase : List[Any] = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=_lowerCamelCase )
else:
# cast to same type
_lowerCAmelCase : Union[str, Any] = logits.dtype
assert torch.allclose(original_logits.to(_lowerCamelCase ) , _lowerCamelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_lowerCAmelCase : Optional[int] = """"""
_lowerCAmelCase : Union[str, Any] = tokenizer(_lowerCamelCase , return_tensors="""pt""" ).input_ids.to(_lowerCamelCase )
_lowerCAmelCase : List[Any] = original_model.generate({"""image""": original_pixel_values} )
_lowerCAmelCase : Dict = hf_model.generate(
_lowerCamelCase , _lowerCamelCase , do_sample=_lowerCamelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , _lowerCamelCase )
_lowerCAmelCase : int = input_ids.shape[1]
_lowerCAmelCase : str = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=_lowerCamelCase )
_lowerCAmelCase : List[str] = [text.strip() for text in output_text]
print("""HF generation:""" , _lowerCamelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(_lowerCamelCase )
hf_model.save_pretrained(_lowerCamelCase )
if push_to_hub:
processor.push_to_hub(f'nielsr/{model_name}' )
hf_model.push_to_hub(f'nielsr/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
UpperCamelCase_ = [
"""blip2-opt-2.7b""",
"""blip2-opt-6.7b""",
"""blip2-opt-2.7b-coco""",
"""blip2-opt-6.7b-coco""",
"""blip2-flan-t5-xl""",
"""blip2-flan-t5-xl-coco""",
"""blip2-flan-t5-xxl""",
]
parser.add_argument(
"""--model_name""",
default="""blip2-opt-2.7b""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
UpperCamelCase_ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase_ = {
"""configuration_nezha""": ["""NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """NezhaConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NezhaForNextSentencePrediction""",
"""NezhaForMaskedLM""",
"""NezhaForPreTraining""",
"""NezhaForMultipleChoice""",
"""NezhaForQuestionAnswering""",
"""NezhaForSequenceClassification""",
"""NezhaForTokenClassification""",
"""NezhaModel""",
"""NezhaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
import argparse
import os
import re
UpperCamelCase_ = """src/diffusers"""
# Pattern that looks at the indentation in a line.
UpperCamelCase_ = re.compile(r"""^(\s*)\S""")
# Pattern that matches `"key":" and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\":""")
# Pattern that matches `_import_structure["key"]` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*_import_structure\[\"([^\"]+)\"\]""")
# Pattern that matches `"key",` and puts `key` in group 0.
UpperCamelCase_ = re.compile(r"""^\s*\"([^\"]+)\",\s*$""")
# Pattern that matches any `[stuff]` and puts `stuff` in group 0.
UpperCamelCase_ = re.compile(r"""\[([^\]]+)\]""")
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> str:
_lowerCAmelCase : Dict = _re_indent.search(_lowerCamelCase )
return "" if search is None else search.groups()[0]
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : List[str]="" , _lowerCamelCase : str=None , _lowerCamelCase : List[Any]=None ) -> str:
_lowerCAmelCase : Union[str, Any] = 0
_lowerCAmelCase : Tuple = code.split("""\n""" )
if start_prompt is not None:
while not lines[index].startswith(_lowerCamelCase ):
index += 1
_lowerCAmelCase : List[Any] = ["""\n""".join(lines[:index] )]
else:
_lowerCAmelCase : List[str] = []
# We split into blocks until we get to the `end_prompt` (or the end of the block).
_lowerCAmelCase : Union[str, Any] = [lines[index]]
index += 1
while index < len(_lowerCamelCase ) and (end_prompt is None or not lines[index].startswith(_lowerCamelCase )):
if len(lines[index] ) > 0 and get_indent(lines[index] ) == indent_level:
if len(_lowerCamelCase ) > 0 and get_indent(current_block[-1] ).startswith(indent_level + """ """ ):
current_block.append(lines[index] )
blocks.append("""\n""".join(_lowerCamelCase ) )
if index < len(_lowerCamelCase ) - 1:
_lowerCAmelCase : Union[str, Any] = [lines[index + 1]]
index += 1
else:
_lowerCAmelCase : Dict = []
else:
blocks.append("""\n""".join(_lowerCamelCase ) )
_lowerCAmelCase : Tuple = [lines[index]]
else:
current_block.append(lines[index] )
index += 1
# Adds current block if it's nonempty.
if len(_lowerCamelCase ) > 0:
blocks.append("""\n""".join(_lowerCamelCase ) )
# Add final block after end_prompt if provided.
if end_prompt is not None and index < len(_lowerCamelCase ):
blocks.append("""\n""".join(lines[index:] ) )
return blocks
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> Any:
def _inner(_lowerCamelCase : Any ):
return key(_lowerCamelCase ).lower().replace("""_""" , """""" )
return _inner
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] , _lowerCamelCase : Tuple=None ) -> Union[str, Any]:
# If no key is provided, we use a noop.
def noop(_lowerCamelCase : List[Any] ):
return x
if key is None:
_lowerCAmelCase : Union[str, Any] = noop
# Constants are all uppercase, they go first.
_lowerCAmelCase : Any = [obj for obj in objects if key(_lowerCamelCase ).isupper()]
# Classes are not all uppercase but start with a capital, they go second.
_lowerCAmelCase : Union[str, Any] = [obj for obj in objects if key(_lowerCamelCase )[0].isupper() and not key(_lowerCamelCase ).isupper()]
# Functions begin with a lowercase, they go last.
_lowerCAmelCase : Optional[Any] = [obj for obj in objects if not key(_lowerCamelCase )[0].isupper()]
_lowerCAmelCase : List[str] = ignore_underscore(_lowerCamelCase )
return sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase ) + sorted(_lowerCamelCase , key=_lowerCamelCase )
def _UpperCAmelCase ( _lowerCamelCase : str ) -> str:
# This inner function sort imports between [ ].
def _replace(_lowerCamelCase : Union[str, Any] ):
_lowerCAmelCase : Optional[Any] = match.groups()[0]
if "," not in imports:
return f'[{imports}]'
_lowerCAmelCase : List[str] = [part.strip().replace("""\"""" , """""" ) for part in imports.split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : int = keys[:-1]
return "[" + ", ".join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] ) + "]"
_lowerCAmelCase : Optional[int] = import_statement.split("""\n""" )
if len(_lowerCamelCase ) > 3:
# Here we have to sort internal imports that are on several lines (one per name):
# key: [
# "object1",
# "object2",
# ...
# ]
# We may have to ignore one or two lines on each side.
_lowerCAmelCase : Dict = 2 if lines[1].strip() == """[""" else 1
_lowerCAmelCase : Tuple = [(i, _re_strip_line.search(_lowerCamelCase ).groups()[0]) for i, line in enumerate(lines[idx:-idx] )]
_lowerCAmelCase : Tuple = sort_objects(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )
_lowerCAmelCase : Optional[Any] = [lines[x[0] + idx] for x in sorted_indices]
return "\n".join(lines[:idx] + sorted_lines + lines[-idx:] )
elif len(_lowerCamelCase ) == 3:
# Here we have to sort internal imports that are on one separate line:
# key: [
# "object1", "object2", ...
# ]
if _re_bracket_content.search(lines[1] ) is not None:
_lowerCAmelCase : str = _re_bracket_content.sub(_replace , lines[1] )
else:
_lowerCAmelCase : Tuple = [part.strip().replace("""\"""" , """""" ) for part in lines[1].split(""",""" )]
# We will have a final empty element if the line finished with a comma.
if len(keys[-1] ) == 0:
_lowerCAmelCase : Dict = keys[:-1]
_lowerCAmelCase : Optional[Any] = get_indent(lines[1] ) + """, """.join([f'"{k}"' for k in sort_objects(_lowerCamelCase )] )
return "\n".join(_lowerCamelCase )
else:
# Finally we have to deal with imports fitting on one line
_lowerCAmelCase : Dict = _re_bracket_content.sub(_replace , _lowerCamelCase )
return import_statement
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : Union[str, Any]=True ) -> List[str]:
with open(_lowerCamelCase , """r""" ) as f:
_lowerCAmelCase : Optional[Any] = f.read()
if "_import_structure" not in code:
return
# Blocks of indent level 0
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(
_lowerCamelCase , start_prompt="""_import_structure = {""" , end_prompt="""if TYPE_CHECKING:""" )
# We ignore block 0 (everything until start_prompt) and the last block (everything after end_prompt).
for block_idx in range(1 , len(_lowerCamelCase ) - 1 ):
# Check if the block contains some `_import_structure`s thingy to sort.
_lowerCAmelCase : List[str] = main_blocks[block_idx]
_lowerCAmelCase : int = block.split("""\n""" )
# Get to the start of the imports.
_lowerCAmelCase : Any = 0
while line_idx < len(_lowerCamelCase ) and "_import_structure" not in block_lines[line_idx]:
# Skip dummy import blocks
if "import dummy" in block_lines[line_idx]:
_lowerCAmelCase : Optional[int] = len(_lowerCamelCase )
else:
line_idx += 1
if line_idx >= len(_lowerCamelCase ):
continue
# Ignore beginning and last line: they don't contain anything.
_lowerCAmelCase : Any = """\n""".join(block_lines[line_idx:-1] )
_lowerCAmelCase : Tuple = get_indent(block_lines[1] )
# Slit the internal block into blocks of indent level 1.
_lowerCAmelCase : Optional[Any] = split_code_in_indented_blocks(_lowerCamelCase , indent_level=_lowerCamelCase )
# We have two categories of import key: list or _import_structure[key].append/extend
_lowerCAmelCase : List[Any] = _re_direct_key if """_import_structure""" in block_lines[0] else _re_indirect_key
# Grab the keys, but there is a trap: some lines are empty or just comments.
_lowerCAmelCase : Tuple = [(pattern.search(_lowerCamelCase ).groups()[0] if pattern.search(_lowerCamelCase ) is not None else None) for b in internal_blocks]
# We only sort the lines with a key.
_lowerCAmelCase : List[str] = [(i, key) for i, key in enumerate(_lowerCamelCase ) if key is not None]
_lowerCAmelCase : List[str] = [x[0] for x in sorted(_lowerCamelCase , key=lambda _lowerCamelCase : x[1] )]
# We reorder the blocks by leaving empty lines/comments as they were and reorder the rest.
_lowerCAmelCase : List[Any] = 0
_lowerCAmelCase : List[str] = []
for i in range(len(_lowerCamelCase ) ):
if keys[i] is None:
reordered_blocks.append(internal_blocks[i] )
else:
_lowerCAmelCase : Any = sort_objects_in_import(internal_blocks[sorted_indices[count]] )
reordered_blocks.append(_lowerCamelCase )
count += 1
# And we put our main block back together with its first and last line.
_lowerCAmelCase : str = """\n""".join(block_lines[:line_idx] + reordered_blocks + [block_lines[-1]] )
if code != "\n".join(_lowerCamelCase ):
if check_only:
return True
else:
print(f'Overwriting {file}.' )
with open(_lowerCamelCase , """w""" ) as f:
f.write("""\n""".join(_lowerCamelCase ) )
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any]=True ) -> Any:
_lowerCAmelCase : List[Any] = []
for root, _, files in os.walk(_lowerCamelCase ):
if "__init__.py" in files:
_lowerCAmelCase : List[Any] = sort_imports(os.path.join(_lowerCamelCase , """__init__.py""" ) , check_only=_lowerCamelCase )
if result:
_lowerCAmelCase : str = [os.path.join(_lowerCamelCase , """__init__.py""" )]
if len(_lowerCamelCase ) > 0:
raise ValueError(f'Would overwrite {len(_lowerCamelCase )} files, run `make style`.' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument("""--check_only""", action="""store_true""", help="""Whether to only check or fix style.""")
UpperCamelCase_ = parser.parse_args()
sort_imports_in_all_inits(check_only=args.check_only)
| 309
| 1
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int ) -> int:
if not isinstance(_lowerCamelCase , _lowerCamelCase ) or number < 0:
raise ValueError("""Input must be a non-negative integer""" )
_lowerCAmelCase : int = 0
while number:
# This way we arrive at next set bit (next 1) instead of looping
# through each bit and checking for 1s hence the
# loop won't run 32 times it will only run the number of `1` times
number &= number - 1
count += 1
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 309
|
'''simple docstring'''
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
UpperCamelCase_ = logging.get_logger(__name__)
class a_ :
def __init__( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : List[str] = question_encoder
_lowerCAmelCase : Optional[Any] = generator
_lowerCAmelCase : Optional[Any] = self.question_encoder
def __UpperCamelCase ( self , snake_case_ ):
if os.path.isfile(snake_case_ ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
_lowerCAmelCase : Any = os.path.join(snake_case_ , """question_encoder_tokenizer""" )
_lowerCAmelCase : Tuple = os.path.join(snake_case_ , """generator_tokenizer""" )
self.question_encoder.save_pretrained(snake_case_ )
self.generator.save_pretrained(snake_case_ )
@classmethod
def __UpperCamelCase ( cls , snake_case_ , **snake_case_ ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
_lowerCAmelCase : Dict = kwargs.pop("""config""" , snake_case_ )
if config is None:
_lowerCAmelCase : List[Any] = RagConfig.from_pretrained(snake_case_ )
_lowerCAmelCase : int = AutoTokenizer.from_pretrained(
snake_case_ , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
_lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained(
snake_case_ , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=snake_case_ , generator=snake_case_ )
def __call__( self , *snake_case_ , **snake_case_ ):
return self.current_tokenizer(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.batch_decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self , *snake_case_ , **snake_case_ ):
return self.generator.decode(*snake_case_ , **snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.question_encoder
def __UpperCamelCase ( self ):
_lowerCAmelCase : Optional[Any] = self.generator
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = "longest" , snake_case_ = None , snake_case_ = True , **snake_case_ , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , snake_case_ , )
if max_length is None:
_lowerCAmelCase : Any = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[Any] = self(
snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , max_length=snake_case_ , padding=snake_case_ , truncation=snake_case_ , **snake_case_ , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
_lowerCAmelCase : List[str] = self.current_tokenizer.model_max_length
_lowerCAmelCase : List[str] = self(
text_target=snake_case_ , add_special_tokens=snake_case_ , return_tensors=snake_case_ , padding=snake_case_ , max_length=snake_case_ , truncation=snake_case_ , **snake_case_ , )
_lowerCAmelCase : Dict = labels["""input_ids"""]
return model_inputs
| 309
| 1
|
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def _UpperCAmelCase ( _lowerCamelCase : List[Any] ) -> List[str]:
_lowerCAmelCase : List[str] = 3_84
if "tiny" in model_name:
_lowerCAmelCase : str = [3, 3, 9, 3]
_lowerCAmelCase : str = [96, 1_92, 3_84, 7_68]
if "small" in model_name:
_lowerCAmelCase : Optional[int] = [3, 3, 27, 3]
_lowerCAmelCase : str = [96, 1_92, 3_84, 7_68]
if "base" in model_name:
_lowerCAmelCase : Optional[Any] = [3, 3, 27, 3]
_lowerCAmelCase : int = [1_28, 2_56, 5_12, 10_24]
_lowerCAmelCase : Tuple = 5_12
if "large" in model_name:
_lowerCAmelCase : Dict = [3, 3, 27, 3]
_lowerCAmelCase : Optional[int] = [1_92, 3_84, 7_68, 15_36]
_lowerCAmelCase : Union[str, Any] = 7_68
if "xlarge" in model_name:
_lowerCAmelCase : int = [3, 3, 27, 3]
_lowerCAmelCase : Any = [2_56, 5_12, 10_24, 20_48]
_lowerCAmelCase : Tuple = 10_24
# set label information
_lowerCAmelCase : Union[str, Any] = 1_50
_lowerCAmelCase : Any = """huggingface/label-files"""
_lowerCAmelCase : Any = """ade20k-id2label.json"""
_lowerCAmelCase : Optional[int] = json.load(open(hf_hub_download(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) , """r""" ) )
_lowerCAmelCase : str = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : Tuple = {v: k for k, v in idalabel.items()}
_lowerCAmelCase : Dict = ConvNextConfig(
depths=_lowerCamelCase , hidden_sizes=_lowerCamelCase , out_features=["""stage1""", """stage2""", """stage3""", """stage4"""] )
_lowerCAmelCase : List[Any] = UperNetConfig(
backbone_config=_lowerCamelCase , auxiliary_in_channels=_lowerCamelCase , num_labels=_lowerCamelCase , idalabel=_lowerCamelCase , labelaid=_lowerCamelCase , )
return config
def _UpperCAmelCase ( _lowerCamelCase : Tuple ) -> int:
_lowerCAmelCase : Tuple = []
# fmt: off
# stem
rename_keys.append(("""backbone.downsample_layers.0.0.weight""", """backbone.embeddings.patch_embeddings.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.0.bias""", """backbone.embeddings.patch_embeddings.bias""") )
rename_keys.append(("""backbone.downsample_layers.0.1.weight""", """backbone.embeddings.layernorm.weight""") )
rename_keys.append(("""backbone.downsample_layers.0.1.bias""", """backbone.embeddings.layernorm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.stages.{i}.{j}.gamma', f'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.weight', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.depthwise_conv.bias', f'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.weight', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.norm.bias', f'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv1.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.weight', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((f'backbone.stages.{i}.{j}.pointwise_conv2.bias', f'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((f'backbone.downsample_layers.{i}.0.weight', f'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.0.bias', f'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.weight', f'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((f'backbone.downsample_layers.{i}.1.bias', f'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((f'backbone.norm{i}.weight', f'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((f'backbone.norm{i}.bias', f'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
("""decode_head.conv_seg.weight""", """decode_head.classifier.weight"""),
("""decode_head.conv_seg.bias""", """decode_head.classifier.bias"""),
("""auxiliary_head.conv_seg.weight""", """auxiliary_head.classifier.weight"""),
("""auxiliary_head.conv_seg.bias""", """auxiliary_head.classifier.bias"""),
] )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Optional[int] , _lowerCamelCase : List[str] ) -> str:
_lowerCAmelCase : Dict = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str , _lowerCamelCase : str ) -> Optional[Any]:
_lowerCAmelCase : str = {
"""upernet-convnext-tiny""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth""",
"""upernet-convnext-small""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth""",
"""upernet-convnext-base""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth""",
"""upernet-convnext-large""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth""",
"""upernet-convnext-xlarge""": """https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth""",
}
_lowerCAmelCase : int = model_name_to_url[model_name]
_lowerCAmelCase : Any = torch.hub.load_state_dict_from_url(_lowerCamelCase , map_location="""cpu""" )["""state_dict"""]
_lowerCAmelCase : Dict = get_upernet_config(_lowerCamelCase )
_lowerCAmelCase : List[str] = UperNetForSemanticSegmentation(_lowerCamelCase )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
_lowerCAmelCase : List[str] = state_dict.pop(_lowerCamelCase )
if "bn" in key:
_lowerCAmelCase : Tuple = key.replace("""bn""" , """batch_norm""" )
_lowerCAmelCase : List[str] = val
# rename keys
_lowerCAmelCase : List[Any] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
# verify on image
_lowerCAmelCase : Optional[int] = """https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg"""
_lowerCAmelCase : int = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw ).convert("""RGB""" )
_lowerCAmelCase : Optional[Any] = SegformerImageProcessor()
_lowerCAmelCase : Tuple = processor(_lowerCamelCase , return_tensors="""pt""" ).pixel_values
with torch.no_grad():
_lowerCAmelCase : List[Any] = model(_lowerCamelCase )
if model_name == "upernet-convnext-tiny":
_lowerCAmelCase : Tuple = torch.tensor(
[[-8.8110, -8.8110, -8.6521], [-8.8110, -8.8110, -8.6521], [-8.7746, -8.7746, -8.6130]] )
elif model_name == "upernet-convnext-small":
_lowerCAmelCase : Optional[Any] = torch.tensor(
[[-8.8236, -8.8236, -8.6771], [-8.8236, -8.8236, -8.6771], [-8.7638, -8.7638, -8.6240]] )
elif model_name == "upernet-convnext-base":
_lowerCAmelCase : Tuple = torch.tensor(
[[-8.8558, -8.8558, -8.6905], [-8.8558, -8.8558, -8.6905], [-8.7669, -8.7669, -8.6021]] )
elif model_name == "upernet-convnext-large":
_lowerCAmelCase : str = torch.tensor(
[[-8.6660, -8.6660, -8.6210], [-8.6660, -8.6660, -8.6210], [-8.6310, -8.6310, -8.5964]] )
elif model_name == "upernet-convnext-xlarge":
_lowerCAmelCase : str = torch.tensor(
[[-8.4980, -8.4980, -8.3977], [-8.4980, -8.4980, -8.3977], [-8.4379, -8.4379, -8.3412]] )
print("""Logits:""" , outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] , _lowerCamelCase , atol=1e-4 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCamelCase )
print(f'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(_lowerCamelCase )
if push_to_hub:
print(f'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(f'openmmlab/{model_name}' )
processor.push_to_hub(f'openmmlab/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""upernet-convnext-tiny""",
type=str,
choices=[F'upernet-convnext-{size}' for size in ["""tiny""", """small""", """base""", """large""", """xlarge"""]],
help="""Name of the ConvNext UperNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
|
'''simple docstring'''
# Algorithm for the pigeonhole sorting
def _UpperCAmelCase ( _lowerCamelCase : Union[str, Any] ) -> List[Any]:
_lowerCAmelCase : List[Any] = min(_lowerCamelCase ) # min() finds the minimum value
_lowerCAmelCase : Tuple = max(_lowerCamelCase ) # max() finds the maximum value
_lowerCAmelCase : int = max_val - min_val + 1 # size is difference of max and min values plus one
# list of pigeonholes of size equal to the variable size
_lowerCAmelCase : Dict = [0] * size
# Populate the pigeonholes.
for x in a:
assert isinstance(_lowerCamelCase , _lowerCamelCase ), "integers only please"
holes[x - min_val] += 1
# Putting the elements back into the array in an order.
_lowerCAmelCase : Any = 0
for count in range(_lowerCamelCase ):
while holes[count] > 0:
holes[count] -= 1
_lowerCAmelCase : Optional[int] = count + min_val
i += 1
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Optional[int] = [8, 3, 2, 7, 4, 6, 8]
pigeonhole_sort(_lowerCamelCase )
print("""Sorted order is:""" , """ """.join(_lowerCamelCase ) )
if __name__ == "__main__":
main()
| 309
| 1
|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_download, hf_hub_url
from PIL import Image
from transformers import DetaConfig, DetaForObjectDetection, DetaImageProcessor, SwinConfig
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
def _UpperCAmelCase ( _lowerCamelCase : int ) -> List[Any]:
_lowerCAmelCase : str = SwinConfig(
embed_dim=1_92 , depths=(2, 2, 18, 2) , num_heads=(6, 12, 24, 48) , window_size=12 , out_features=["""stage2""", """stage3""", """stage4"""] , )
_lowerCAmelCase : List[str] = DetaConfig(
backbone_config=_lowerCamelCase , num_queries=9_00 , encoder_ffn_dim=20_48 , decoder_ffn_dim=20_48 , num_feature_levels=5 , assign_first_stage=_lowerCamelCase , with_box_refine=_lowerCamelCase , two_stage=_lowerCamelCase , )
# set labels
_lowerCAmelCase : str = """huggingface/label-files"""
if "o365" in model_name:
_lowerCAmelCase : str = 3_66
_lowerCAmelCase : Any = """object365-id2label.json"""
else:
_lowerCAmelCase : str = 91
_lowerCAmelCase : Dict = """coco-detection-id2label.json"""
_lowerCAmelCase : int = num_labels
_lowerCAmelCase : Dict = json.load(open(cached_download(hf_hub_url(_lowerCamelCase , _lowerCamelCase , repo_type="""dataset""" ) ) , """r""" ) )
_lowerCAmelCase : List[Any] = {int(_lowerCamelCase ): v for k, v in idalabel.items()}
_lowerCAmelCase : str = idalabel
_lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
return config
def _UpperCAmelCase ( _lowerCamelCase : Any ) -> List[str]:
_lowerCAmelCase : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(("""backbone.0.body.patch_embed.proj.weight""", """model.backbone.model.embeddings.patch_embeddings.projection.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.proj.bias""", """model.backbone.model.embeddings.patch_embeddings.projection.bias""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.weight""", """model.backbone.model.embeddings.norm.weight""") )
rename_keys.append(("""backbone.0.body.patch_embed.norm.bias""", """model.backbone.model.embeddings.norm.bias""") )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_before.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_bias_table', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_bias_table') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.relative_position_index', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.self.relative_position_index') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.attn.proj.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.attention.output.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.norm2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.layernorm_after.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc1.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.intermediate.dense.bias') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.weight', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.blocks.{j}.mlp.fc2.bias', f'model.backbone.model.encoder.layers.{i}.blocks.{j}.output.dense.bias') )
if i < 3:
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.reduction.weight', f'model.backbone.model.encoder.layers.{i}.downsample.reduction.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.weight', f'model.backbone.model.encoder.layers.{i}.downsample.norm.weight') )
rename_keys.append((f'backbone.0.body.layers.{i}.downsample.norm.bias', f'model.backbone.model.encoder.layers.{i}.downsample.norm.bias') )
rename_keys.append(("""backbone.0.body.norm1.weight""", """model.backbone.model.hidden_states_norms.stage2.weight""") )
rename_keys.append(("""backbone.0.body.norm1.bias""", """model.backbone.model.hidden_states_norms.stage2.bias""") )
rename_keys.append(("""backbone.0.body.norm2.weight""", """model.backbone.model.hidden_states_norms.stage3.weight""") )
rename_keys.append(("""backbone.0.body.norm2.bias""", """model.backbone.model.hidden_states_norms.stage3.bias""") )
rename_keys.append(("""backbone.0.body.norm3.weight""", """model.backbone.model.hidden_states_norms.stage4.weight""") )
rename_keys.append(("""backbone.0.body.norm3.bias""", """model.backbone.model.hidden_states_norms.stage4.bias""") )
# transformer encoder
for i in range(config.encoder_layers ):
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.weight', f'model.encoder.layers.{i}.self_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.sampling_offsets.bias', f'model.encoder.layers.{i}.self_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.weight', f'model.encoder.layers.{i}.self_attn.attention_weights.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.attention_weights.bias', f'model.encoder.layers.{i}.self_attn.attention_weights.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.weight', f'model.encoder.layers.{i}.self_attn.value_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.value_proj.bias', f'model.encoder.layers.{i}.self_attn.value_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.weight', f'model.encoder.layers.{i}.self_attn.output_proj.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.self_attn.output_proj.bias', f'model.encoder.layers.{i}.self_attn.output_proj.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.weight', f'model.encoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm1.bias', f'model.encoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.weight', f'model.encoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear1.bias', f'model.encoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.weight', f'model.encoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.linear2.bias', f'model.encoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.weight', f'model.encoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.encoder.layers.{i}.norm2.bias', f'model.encoder.layers.{i}.final_layer_norm.bias') )
# transformer decoder
for i in range(config.decoder_layers ):
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.weight', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.sampling_offsets.bias', f'model.decoder.layers.{i}.encoder_attn.sampling_offsets.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.weight', f'model.decoder.layers.{i}.encoder_attn.attention_weights.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.attention_weights.bias', f'model.decoder.layers.{i}.encoder_attn.attention_weights.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.weight', f'model.decoder.layers.{i}.encoder_attn.value_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.value_proj.bias', f'model.decoder.layers.{i}.encoder_attn.value_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.weight', f'model.decoder.layers.{i}.encoder_attn.output_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.cross_attn.output_proj.bias', f'model.decoder.layers.{i}.encoder_attn.output_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.weight', f'model.decoder.layers.{i}.encoder_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm1.bias', f'model.decoder.layers.{i}.encoder_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.weight', f'model.decoder.layers.{i}.self_attn.out_proj.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.self_attn.out_proj.bias', f'model.decoder.layers.{i}.self_attn.out_proj.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.weight', f'model.decoder.layers.{i}.self_attn_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm2.bias', f'model.decoder.layers.{i}.self_attn_layer_norm.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.weight', f'model.decoder.layers.{i}.fc1.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear1.bias', f'model.decoder.layers.{i}.fc1.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.weight', f'model.decoder.layers.{i}.fc2.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.linear2.bias', f'model.decoder.layers.{i}.fc2.bias') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.weight', f'model.decoder.layers.{i}.final_layer_norm.weight') )
rename_keys.append((f'transformer.decoder.layers.{i}.norm3.bias', f'model.decoder.layers.{i}.final_layer_norm.bias') )
# fmt: on
return rename_keys
def _UpperCAmelCase ( _lowerCamelCase : Tuple , _lowerCamelCase : Union[str, Any] , _lowerCamelCase : List[str] ) -> Optional[int]:
_lowerCAmelCase : int = dct.pop(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = val
def _UpperCAmelCase ( _lowerCamelCase : int , _lowerCamelCase : int ) -> Union[str, Any]:
_lowerCAmelCase : Any = [int(backbone_config.embed_dim * 2**i ) for i in range(len(backbone_config.depths ) )]
for i in range(len(backbone_config.depths ) ):
_lowerCAmelCase : List[str] = num_features[i]
for j in range(backbone_config.depths[i] ):
# fmt: off
# read in weights + bias of input projection layer (in original implementation, this is a single matrix + bias)
_lowerCAmelCase : Optional[Any] = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.weight' )
_lowerCAmelCase : Tuple = state_dict.pop(f'backbone.0.body.layers.{i}.blocks.{j}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : int = in_proj_weight[:dim, :]
_lowerCAmelCase : Optional[int] = in_proj_bias[: dim]
_lowerCAmelCase : Union[str, Any] = in_proj_weight[
dim : dim * 2, :
]
_lowerCAmelCase : List[Any] = in_proj_bias[
dim : dim * 2
]
_lowerCAmelCase : str = in_proj_weight[
-dim :, :
]
_lowerCAmelCase : List[str] = in_proj_bias[-dim :]
# fmt: on
def _UpperCAmelCase ( _lowerCamelCase : Optional[int] , _lowerCamelCase : str ) -> List[str]:
# transformer decoder self-attention layers
_lowerCAmelCase : Optional[Any] = config.d_model
for i in range(config.decoder_layers ):
# read in weights + bias of input projection layer of self-attention
_lowerCAmelCase : Tuple = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_weight' )
_lowerCAmelCase : List[str] = state_dict.pop(f'transformer.decoder.layers.{i}.self_attn.in_proj_bias' )
# next, add query, keys and values (in that order) to the state dict
_lowerCAmelCase : Tuple = in_proj_weight[:hidden_size, :]
_lowerCAmelCase : Dict = in_proj_bias[:hidden_size]
_lowerCAmelCase : List[str] = in_proj_weight[
hidden_size : hidden_size * 2, :
]
_lowerCAmelCase : Any = in_proj_bias[hidden_size : hidden_size * 2]
_lowerCAmelCase : int = in_proj_weight[-hidden_size:, :]
_lowerCAmelCase : Dict = in_proj_bias[-hidden_size:]
def _UpperCAmelCase ( ) -> Optional[int]:
_lowerCAmelCase : Dict = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase : str = Image.open(requests.get(_lowerCamelCase , stream=_lowerCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Tuple , _lowerCamelCase : Optional[Any] ) -> Tuple:
_lowerCAmelCase : List[Any] = get_deta_config(_lowerCamelCase )
# load original state dict
if model_name == "deta-swin-large":
_lowerCAmelCase : List[str] = hf_hub_download(repo_id="""nielsr/deta-checkpoints""" , filename="""adet_swin_ft.pth""" )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase : Optional[int] = hf_hub_download(repo_id="""jozhang97/deta-swin-l-o365""" , filename="""deta_swin_pt_o365.pth""" )
else:
raise ValueError(f'Model name {model_name} not supported' )
_lowerCAmelCase : Dict = torch.load(_lowerCamelCase , map_location="""cpu""" )["""model"""]
# original state dict
for name, param in state_dict.items():
print(_lowerCamelCase , param.shape )
# rename keys
_lowerCAmelCase : List[Any] = create_rename_keys(_lowerCamelCase )
for src, dest in rename_keys:
rename_key(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
read_in_swin_q_k_v(_lowerCamelCase , config.backbone_config )
read_in_decoder_q_k_v(_lowerCamelCase , _lowerCamelCase )
# fix some prefixes
for key in state_dict.copy().keys():
if "transformer.decoder.class_embed" in key or "transformer.decoder.bbox_embed" in key:
_lowerCAmelCase : Optional[Any] = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : str = val
if "input_proj" in key:
_lowerCAmelCase : Any = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Dict = val
if "level_embed" in key or "pos_trans" in key or "pix_trans" in key or "enc_output" in key:
_lowerCAmelCase : str = state_dict.pop(_lowerCamelCase )
_lowerCAmelCase : Dict = val
# finally, create HuggingFace model and load state dict
_lowerCAmelCase : Tuple = DetaForObjectDetection(_lowerCamelCase )
model.load_state_dict(_lowerCamelCase )
model.eval()
_lowerCAmelCase : Dict = """cuda""" if torch.cuda.is_available() else """cpu"""
model.to(_lowerCamelCase )
# load image processor
_lowerCAmelCase : str = DetaImageProcessor(format="""coco_detection""" )
# verify our conversion on image
_lowerCAmelCase : int = prepare_img()
_lowerCAmelCase : Dict = processor(images=_lowerCamelCase , return_tensors="""pt""" )
_lowerCAmelCase : Optional[Any] = encoding["""pixel_values"""]
_lowerCAmelCase : Optional[Any] = model(pixel_values.to(_lowerCamelCase ) )
# verify logits
print("""Logits:""" , outputs.logits[0, :3, :3] )
print("""Boxes:""" , outputs.pred_boxes[0, :3, :3] )
if model_name == "deta-swin-large":
_lowerCAmelCase : Union[str, Any] = torch.tensor(
[[-7.6308, -2.8485, -5.3737], [-7.2037, -4.5505, -4.8027], [-7.2943, -4.2611, -4.6617]] )
_lowerCAmelCase : int = torch.tensor([[0.4987, 0.4969, 0.9999], [0.2549, 0.5498, 0.4805], [0.5498, 0.2757, 0.0569]] )
elif model_name == "deta-swin-large-o365":
_lowerCAmelCase : List[str] = torch.tensor(
[[-8.0122, -3.5720, -4.9717], [-8.1547, -3.6886, -4.6389], [-7.6610, -3.6194, -5.0134]] )
_lowerCAmelCase : Union[str, Any] = torch.tensor([[0.2523, 0.5549, 0.4881], [0.7715, 0.4149, 0.4601], [0.5503, 0.2753, 0.0575]] )
assert torch.allclose(outputs.logits[0, :3, :3] , expected_logits.to(_lowerCamelCase ) , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , expected_boxes.to(_lowerCamelCase ) , atol=1e-4 )
print("""Everything ok!""" )
if pytorch_dump_folder_path:
# Save model and processor
logger.info(f'Saving PyTorch model and processor to {pytorch_dump_folder_path}...' )
Path(_lowerCamelCase ).mkdir(exist_ok=_lowerCamelCase )
model.save_pretrained(_lowerCamelCase )
processor.save_pretrained(_lowerCamelCase )
# Push to hub
if push_to_hub:
print("""Pushing model and processor to hub...""" )
model.push_to_hub(f'jozhang97/{model_name}' )
processor.push_to_hub(f'jozhang97/{model_name}' )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
parser.add_argument(
"""--model_name""",
type=str,
default="""deta-swin-large""",
choices=["""deta-swin-large""", """deta-swin-large-o365"""],
help="""Name of the model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
help="""Path to the folder to output PyTorch model.""",
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
UpperCamelCase_ = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 309
|
'''simple docstring'''
def _UpperCAmelCase ( _lowerCamelCase : int = 1_00 ) -> int:
_lowerCAmelCase : Optional[Any] = (n * (n + 1) // 2) ** 2
_lowerCAmelCase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'{solution() = }')
| 309
| 1
|
'''simple docstring'''
import copy
import random
from transformers import CLIPTokenizer
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
super().__init__(*snake_case_ , **snake_case_ )
_lowerCAmelCase : List[Any] = {}
def __UpperCamelCase ( self , snake_case_ , *snake_case_ , **snake_case_ ):
_lowerCAmelCase : List[str] = super().add_tokens(snake_case_ , *snake_case_ , **snake_case_ )
if num_added_tokens == 0:
raise ValueError(
f'The tokenizer already contains the token {placeholder_token}. Please pass a different'
""" `placeholder_token` that is not already in the tokenizer.""" )
def __UpperCamelCase ( self , snake_case_ , *snake_case_ , snake_case_=1 , **snake_case_ ):
_lowerCAmelCase : str = []
if num_vec_per_token == 1:
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
else:
_lowerCAmelCase : Optional[Any] = []
for i in range(snake_case_ ):
_lowerCAmelCase : Any = placeholder_token + f'_{i}'
self.try_adding_tokens(snake_case_ , *snake_case_ , **snake_case_ )
output.append(snake_case_ )
# handle cases where there is a new placeholder token that contains the current placeholder token but is larger
for token in self.token_map:
if token in placeholder_token:
raise ValueError(
f'The tokenizer already has placeholder token {token} that can get confused with'
f' {placeholder_token}keep placeholder tokens independent' )
_lowerCAmelCase : int = output
def __UpperCamelCase ( self , snake_case_ , snake_case_=False , snake_case_=1.0 ):
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : List[Any] = []
for i in range(len(snake_case_ ) ):
output.append(self.replace_placeholder_tokens_in_text(text[i] , vector_shuffle=snake_case_ ) )
return output
for placeholder_token in self.token_map:
if placeholder_token in text:
_lowerCAmelCase : Optional[Any] = self.token_map[placeholder_token]
_lowerCAmelCase : Union[str, Any] = tokens[: 1 + int(len(snake_case_ ) * prop_tokens_to_load )]
if vector_shuffle:
_lowerCAmelCase : Dict = copy.copy(snake_case_ )
random.shuffle(snake_case_ )
_lowerCAmelCase : Optional[Any] = text.replace(snake_case_ , """ """.join(snake_case_ ) )
return text
def __call__( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
return super().__call__(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
def __UpperCamelCase ( self , snake_case_ , *snake_case_ , snake_case_=False , snake_case_=1.0 , **snake_case_ ):
return super().encode(
self.replace_placeholder_tokens_in_text(
snake_case_ , vector_shuffle=snake_case_ , prop_tokens_to_load=snake_case_ ) , *snake_case_ , **snake_case_ , )
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {"""configuration_focalnet""": ["""FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FocalNetConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FocalNetForImageClassification""",
"""FocalNetForMaskedImageModeling""",
"""FocalNetBackbone""",
"""FocalNetModel""",
"""FocalNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import sys
from collections import defaultdict
class a_ :
def __init__( self ):
_lowerCAmelCase : List[str] = []
def __UpperCamelCase ( self , snake_case_ ):
return self.node_position[vertex]
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Any = pos
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_lowerCAmelCase : Dict = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_lowerCAmelCase : int = 2 * start + 1
else:
_lowerCAmelCase : List[str] = 2 * start + 2
if heap[smallest_child] < heap[start]:
_lowerCAmelCase , _lowerCAmelCase : List[str] = heap[smallest_child], positions[smallest_child]
_lowerCAmelCase , _lowerCAmelCase : Dict = (
heap[start],
positions[start],
)
_lowerCAmelCase , _lowerCAmelCase : Dict = temp, tempa
_lowerCAmelCase : int = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , snake_case_ )
self.top_to_bottom(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = position[index]
while index != 0:
_lowerCAmelCase : Tuple = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_lowerCAmelCase : str = heap[parent]
_lowerCAmelCase : Optional[Any] = position[parent]
self.set_position(position[parent] , snake_case_ )
else:
_lowerCAmelCase : str = val
_lowerCAmelCase : Any = temp
self.set_position(snake_case_ , snake_case_ )
break
_lowerCAmelCase : Dict = parent
else:
_lowerCAmelCase : List[Any] = val
_lowerCAmelCase : Tuple = temp
self.set_position(snake_case_ , 0 )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = len(snake_case_ ) // 2 - 1
for i in range(snake_case_ , -1 , -1 ):
self.top_to_bottom(snake_case_ , snake_case_ , len(snake_case_ ) , snake_case_ )
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
_lowerCAmelCase : Tuple = positions[0]
_lowerCAmelCase : List[str] = sys.maxsize
self.top_to_bottom(snake_case_ , 0 , len(snake_case_ ) , snake_case_ )
return temp
def _UpperCAmelCase ( _lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : int = Heap()
_lowerCAmelCase : Dict = [0] * len(_lowerCamelCase )
_lowerCAmelCase : Dict = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_lowerCAmelCase : str = [] # Heap of Distance of vertices from their neighboring vertex
_lowerCAmelCase : List[str] = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
_lowerCAmelCase : Any = []
_lowerCAmelCase : Optional[int] = 1
_lowerCAmelCase : int = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_lowerCAmelCase : str = 0
_lowerCAmelCase : List[Any] = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
_lowerCAmelCase : Union[str, Any] = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_lowerCAmelCase : int = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
_lowerCAmelCase : Any = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
UpperCamelCase_ = int(input("""Enter number of edges: """).strip())
UpperCamelCase_ = defaultdict(list)
for _ in range(edges_number):
UpperCamelCase_ = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 309
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available, is_vision_available
from ...utils import OptionalDependencyNotAvailable
UpperCamelCase_ = {"""configuration_dpt""": ["""DPT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DPTConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""DPTFeatureExtractor"""]
UpperCamelCase_ = ["""DPTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""DPT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DPTForDepthEstimation""",
"""DPTForSemanticSegmentation""",
"""DPTModel""",
"""DPTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dpt import DPT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPTConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_dpt import DPTFeatureExtractor
from .image_processing_dpt import DPTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dpt import (
DPT_PRETRAINED_MODEL_ARCHIVE_LIST,
DPTForDepthEstimation,
DPTForSemanticSegmentation,
DPTModel,
DPTPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
| 1
|
'''simple docstring'''
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
UpperCamelCase_ = get_tests_dir() + """/test_data/fsmt/fsmt_val_data.json"""
with io.open(filename, """r""", encoding="""utf-8""") as f:
UpperCamelCase_ = json.load(f)
@require_torch
class a_ (unittest.TestCase ):
def __UpperCamelCase ( self , snake_case_ ):
return FSMTTokenizer.from_pretrained(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = FSMTForConditionalGeneration.from_pretrained(snake_case_ ).to(snake_case_ )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def __UpperCamelCase ( self , snake_case_ , snake_case_ ):
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
_lowerCAmelCase : int = f'facebook/wmt19-{pair}'
_lowerCAmelCase : int = self.get_tokenizer(snake_case_ )
_lowerCAmelCase : Optional[int] = self.get_model(snake_case_ )
_lowerCAmelCase : Optional[Any] = bleu_data[pair]["""src"""]
_lowerCAmelCase : int = bleu_data[pair]["""tgt"""]
_lowerCAmelCase : Any = tokenizer(snake_case_ , return_tensors="""pt""" , truncation=snake_case_ , padding="""longest""" ).to(snake_case_ )
_lowerCAmelCase : Tuple = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
_lowerCAmelCase : Union[str, Any] = tokenizer.batch_decode(
snake_case_ , skip_special_tokens=snake_case_ , clean_up_tokenization_spaces=snake_case_ )
_lowerCAmelCase : Optional[int] = calculate_bleu(snake_case_ , snake_case_ )
print(snake_case_ )
self.assertGreaterEqual(scores["""bleu"""] , snake_case_ )
| 309
|
'''simple docstring'''
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( _lowerCamelCase : list[float] ) -> Dict:
return np.maximum(0 , _lowerCamelCase )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 309
| 1
|
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 309
|
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_donut import DonutImageProcessor
UpperCamelCase_ = logging.get_logger(__name__)
class a_ (_a ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
"""The class DonutFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DonutImageProcessor instead.""" , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 309
| 1
|
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
"""configuration_layoutlmv2""": ["""LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LayoutLMv2Config"""],
"""processing_layoutlmv2""": ["""LayoutLMv2Processor"""],
"""tokenization_layoutlmv2""": ["""LayoutLMv2Tokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""LayoutLMv2TokenizerFast"""]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["""LayoutLMv2FeatureExtractor"""]
UpperCamelCase_ = ["""LayoutLMv2ImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"""LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LayoutLMv2ForQuestionAnswering""",
"""LayoutLMv2ForSequenceClassification""",
"""LayoutLMv2ForTokenClassification""",
"""LayoutLMv2Layer""",
"""LayoutLMv2Model""",
"""LayoutLMv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_layoutlmva import LAYOUTLMV2_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMvaConfig
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor, LayoutLMvaImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV2_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaLayer,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 309
|
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ (_a ):
__lowerCAmelCase : Dict = (DPMSolverSDEScheduler,)
__lowerCAmelCase : Dict = 1_0
def __UpperCamelCase ( self , **snake_case_ ):
_lowerCAmelCase : List[Any] = {
"""num_train_timesteps""": 1_1_0_0,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**snake_case_ )
return config
def __UpperCamelCase ( self ):
for timesteps in [1_0, 5_0, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def __UpperCamelCase ( self ):
for beta_start, beta_end in zip([0.0_0001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=snake_case_ , beta_end=snake_case_ )
def __UpperCamelCase ( self ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=snake_case_ )
def __UpperCamelCase ( self ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def __UpperCamelCase ( self ):
_lowerCAmelCase : List[str] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : Any = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[Any] = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : Union[str, Any] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Union[str, Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_8210_4492_1875 ) < 1E-2
assert abs(result_mean.item() - 0.2178_7059_6456_5277 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3521_1181_6406 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9068_9229_9652 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : str = self.scheduler_classes[0]
_lowerCAmelCase : Optional[Any] = self.get_scheduler_config(prediction_type="""v_prediction""" )
_lowerCAmelCase : Dict = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps )
_lowerCAmelCase : int = self.dummy_model()
_lowerCAmelCase : Dict = self.dummy_sample_deter * scheduler.init_noise_sigma
_lowerCAmelCase : int = sample.to(snake_case_ )
for i, t in enumerate(scheduler.timesteps ):
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : List[Any] = model(snake_case_ , snake_case_ )
_lowerCAmelCase : str = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : int = output.prev_sample
_lowerCAmelCase : str = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Optional[int] = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_1492_0043_9453 ) < 1E-2
assert abs(result_mean.item() - 0.1_6226_2890_1481_6284 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_6633_6059_5703 ) < 1E-2
assert abs(result_mean.item() - 0.1_6688_3260_0116_7297 ) < 1E-3
else:
assert abs(result_sum.item() - 119.8_4875_4882_8125 ) < 1E-2
assert abs(result_mean.item() - 0.1560_5306_6253_6621 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_lowerCAmelCase : str = self.get_scheduler_config()
_lowerCAmelCase : str = scheduler_class(**snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : Tuple = self.dummy_model()
_lowerCAmelCase : Optional[int] = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_lowerCAmelCase : str = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Any = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : Dict = output.prev_sample
_lowerCAmelCase : List[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_9573_9746_0938 ) < 1E-2
assert abs(result_mean.item() - 0.2_1805_9346_0798_2635 ) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_3536_3769_5312 ) < 1E-2
assert abs(result_mean.item() - 0.2_2342_9083_8241_5771 ) < 1E-3
else:
assert abs(result_sum.item() - 162.52_3834_2285_1562 ) < 1E-2
assert abs(result_mean.item() - 0.211_6195_7085_1326 ) < 1E-3
def __UpperCamelCase ( self ):
_lowerCAmelCase : Any = self.scheduler_classes[0]
_lowerCAmelCase : Optional[int] = self.get_scheduler_config()
_lowerCAmelCase : Tuple = scheduler_class(**snake_case_ , use_karras_sigmas=snake_case_ )
scheduler.set_timesteps(self.num_inference_steps , device=snake_case_ )
_lowerCAmelCase : List[Any] = self.dummy_model()
_lowerCAmelCase : str = self.dummy_sample_deter.to(snake_case_ ) * scheduler.init_noise_sigma
_lowerCAmelCase : Optional[int] = sample.to(snake_case_ )
for t in scheduler.timesteps:
_lowerCAmelCase : List[str] = scheduler.scale_model_input(snake_case_ , snake_case_ )
_lowerCAmelCase : int = model(snake_case_ , snake_case_ )
_lowerCAmelCase : Optional[int] = scheduler.step(snake_case_ , snake_case_ , snake_case_ )
_lowerCAmelCase : str = output.prev_sample
_lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(snake_case_ ) )
_lowerCAmelCase : Dict = torch.mean(torch.abs(snake_case_ ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_9741_3574_2188 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_6535_6445_3125 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
else:
assert abs(result_sum.item() - 170.3_1352_2338_8672 ) < 1E-2
assert abs(result_mean.item() - 0.2_3003_8727_3098_1811 ) < 1E-2
| 309
| 1
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.