code
stringlengths 86
54.5k
| code_codestyle
int64 0
371
| style_context
stringlengths 87
49.2k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
|---|---|---|---|---|
"""simple docstring"""
from collections import defaultdict
from pathlib import Path
import pandas as pd
from rouge_cli import calculate_rouge_path
from utils import calculate_rouge
__UpperCAmelCase : Any = [
'Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of the'
' final seconds on board Flight 9525. The Germanwings co-pilot says he had a "previous episode of severe'
' depression\" German airline confirms it knew of Andreas Lubitz\'s depression years before he took control.',
'The Palestinian Authority officially becomes the 123rd member of the International Criminal Court. The formal'
' accession was marked with a ceremony at The Hague, in the Netherlands. The Palestinians signed the ICC\'s'
' founding Rome Statute in January. Israel and the United States opposed the Palestinians\' efforts to join the'
' body.',
'Amnesty International releases its annual report on the death penalty. The report catalogs the use of'
' state-sanctioned killing as a punitive measure across the globe. At least 607 people were executed around the'
' world in 2014, compared to 778 in 2013. The U.S. remains one of the worst offenders for imposing capital'
' punishment.',
]
__UpperCAmelCase : int = [
'Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .'
' Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz'
' had informed his Lufthansa training school of an episode of severe depression, airline says .',
'Membership gives the ICC jurisdiction over alleged crimes committed in Palestinian territories since last June .'
' Israel and the United States opposed the move, which could open the door to war crimes investigations against'
' Israelis .',
'Amnesty\'s annual death penalty report catalogs encouraging signs, but setbacks in numbers of those sentenced to'
' death . Organization claims that governments around the world are using the threat of terrorism to advance'
' executions . The number of executions worldwide has gone down by almost 22% compared with 2013, but death'
' sentences up by 28% .',
]
def A__ ( ) -> Tuple:
__snake_case: Any = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , bootstrap_aggregation=UpperCamelCase__ , rouge_keys=["""rouge2""", """rougeL"""])
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
__snake_case: Dict = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , bootstrap_aggregation=UpperCamelCase__ , rouge_keys=["""rouge2"""])
assert (
pd.DataFrame(no_aggregation["""rouge2"""]).fmeasure.mean()
== pd.DataFrame(no_aggregation_just_ra["""rouge2"""]).fmeasure.mean()
)
def A__ ( ) -> Any:
__snake_case: Dict = '''rougeLsum'''
__snake_case: Union[str, Any] = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=[k])[k]
__snake_case: Tuple = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=[k])[k]
assert score > score_no_sep
def A__ ( ) -> int:
__snake_case: str = ['''rouge1''', '''rouge2''', '''rougeL''']
__snake_case: List[Any] = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=UpperCamelCase__)
__snake_case: Tuple = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__ , rouge_keys=UpperCamelCase__)
assert score_sep == score_no_sep
def A__ ( ) -> int:
__snake_case: Union[str, Any] = [
'''Her older sister, Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports .''',
]
__snake_case: str = [
'''Margot Frank, died in 1945, a month earlier than previously thought.''',
'''Prosecutor: "No videos were used in the crash investigation" German papers say they saw a cell phone video of'''
''' the final seconds on board Flight 9525.''',
]
assert calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__) == calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , newline_sep=UpperCamelCase__)
def A__ ( ) -> Union[str, Any]:
__snake_case: Optional[int] = [
'''" "a person who has such a video needs to immediately give it to the investigators," prosecutor says .<n> "it is a very disturbing scene," editor-in-chief of bild online tells "erin burnett: outfront" '''
]
__snake_case: Any = [
''' Marseille prosecutor says "so far no videos were used in the crash investigation" despite media reports . Journalists at Bild and Paris Match are "very confident" the video clip is real, an editor says . Andreas Lubitz had informed his Lufthansa training school of an episode of severe depression, airline says .'''
]
__snake_case: str = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , rouge_keys=["""rougeLsum"""] , newline_sep=UpperCamelCase__)['''rougeLsum''']
__snake_case: str = calculate_rouge(UpperCamelCase__ , UpperCamelCase__ , rouge_keys=["""rougeLsum"""])['''rougeLsum''']
assert new_score > prev_score
def A__ ( ) -> int:
__snake_case: Optional[Any] = Path("""examples/seq2seq/test_data/wmt_en_ro""")
__snake_case: Optional[int] = calculate_rouge_path(data_dir.joinpath("""test.source""") , data_dir.joinpath("""test.target"""))
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
__snake_case: Optional[Any] = calculate_rouge_path(
data_dir.joinpath("""test.source""") , data_dir.joinpath("""test.target""") , bootstrap_aggregation=UpperCamelCase__)
assert isinstance(UpperCamelCase__ , UpperCamelCase__)
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
torch.set_grad_enabled(False)
__UpperCAmelCase : Optional[int] = 'cuda' if torch.cuda.is_available() else 'cpu'
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=100 , SCREAMING_SNAKE_CASE__=" ") -> List[Any]:
__snake_case: List[Any] = text.split(UpperCAmelCase_)
return [character.join(text[i : i + n]).strip() for i in range(0 , len(UpperCAmelCase_) , UpperCAmelCase_)]
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
__snake_case , __snake_case: Optional[Any] = [], []
for title, text in zip(documents["""title"""] , documents["""text"""]):
if text is not None:
for passage in split_text(UpperCAmelCase_):
titles.append(title if title is not None else """""")
texts.append(UpperCAmelCase_)
return {"title": titles, "text": texts}
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: Any = ctx_tokenizer(
documents["""title"""] , documents["""text"""] , truncation=UpperCAmelCase_ , padding="""longest""" , return_tensors="""pt""")["""input_ids"""]
__snake_case: List[str] = ctx_encoder(input_ids.to(device=UpperCAmelCase_) , return_dict=UpperCAmelCase_).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
######################################
logger.info("""Step 1 - Create the dataset""")
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
__snake_case: Optional[int] = load_dataset(
"""csv""" , data_files=[rag_example_args.csv_path] , split="""train""" , delimiter="""\t""" , column_names=["""title""", """text"""])
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
__snake_case: str = dataset.map(UpperCAmelCase_ , batched=UpperCAmelCase_ , num_proc=processing_args.num_proc)
# And compute the embeddings
__snake_case: Tuple = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name).to(device=UpperCAmelCase_)
__snake_case: str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name)
__snake_case: Optional[int] = Features(
{"""text""": Value("""string"""), """title""": Value("""string"""), """embeddings""": Sequence(Value("""float32"""))}) # optional, save as float32 instead of float64 to save space
__snake_case: Optional[int] = dataset.map(
partial(UpperCAmelCase_ , ctx_encoder=UpperCAmelCase_ , ctx_tokenizer=UpperCAmelCase_) , batched=UpperCAmelCase_ , batch_size=processing_args.batch_size , features=UpperCAmelCase_ , )
# And finally save your dataset
__snake_case: Dict = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset""")
dataset.save_to_disk(UpperCAmelCase_)
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info("""Step 2 - Index the dataset""")
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
__snake_case: Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT)
dataset.add_faiss_index("""embeddings""" , custom_index=UpperCAmelCase_)
# And save the index
__snake_case: Any = os.path.join(rag_example_args.output_dir , """my_knowledge_dataset_hnsw_index.faiss""")
dataset.get_index("""embeddings""").save(UpperCAmelCase_)
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = field(
default=str(Path(_lowerCamelCase ).parent / """test_run""" / """dummy-kb""" / """my_knowledge_dataset.csv""" ) , metadata={"""help""": """Path to a tab-separated csv file with columns 'title' and 'text'"""} , )
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={"""help""": """Question that is passed as input to RAG. Default is 'What does Moses' rod turn into ?'."""} , )
lowerCAmelCase__ = field(
default="""facebook/rag-sequence-nq""" , metadata={"""help""": """The RAG model to use. Either 'facebook/rag-sequence-nq' or 'facebook/rag-token-nq'"""} , )
lowerCAmelCase__ = field(
default="""facebook/dpr-ctx_encoder-multiset-base""" , metadata={
"""help""": (
"""The DPR context encoder model to use. Either 'facebook/dpr-ctx_encoder-single-nq-base' or"""
""" 'facebook/dpr-ctx_encoder-multiset-base'"""
)
} , )
lowerCAmelCase__ = field(
default=str(Path(_lowerCamelCase ).parent / """test_run""" / """dummy-kb""" ) , metadata={"""help""": """Path to a directory where the dataset passages and the index will be saved"""} , )
@dataclass
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = field(
default=_lowerCamelCase , metadata={
"""help""": """The number of processes to use to split the documents into passages. Default is single process."""
} , )
lowerCAmelCase__ = field(
default=16 , metadata={
"""help""": """The batch size to use when computing the passages embeddings using the DPR context encoder."""
} , )
@dataclass
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = field(
default=7_68 , metadata={"""help""": """The dimension of the embeddings to pass to the HNSW Faiss index."""} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={
"""help""": (
"""The number of bi-directional links created for every new element during the HNSW index construction."""
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
__UpperCAmelCase : Any = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
__UpperCAmelCase : Any = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
__UpperCAmelCase : str = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args)
| 353
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293
| 0
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__UpperCAmelCase : Any = trt.Logger(trt.Logger.WARNING)
__UpperCAmelCase : Optional[int] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__UpperCAmelCase : Any = logging.getLogger(__name__)
__UpperCAmelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--onnx_model_path",
default=None,
type=str,
required=True,
help="Path to ONNX model: ",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints and predictions will be written.",
)
# Other parameters
parser.add_argument(
"--tokenizer_name",
default="",
type=str,
required=True,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--version_2_with_negative",
action="store_true",
help="If true, the SQuAD examples contain some that do not have an answer.",
)
parser.add_argument(
"--null_score_diff_threshold",
type=float,
default=0.0,
help="If null_score - best_non_null is greater than the threshold predict null.",
)
parser.add_argument(
"--max_seq_length",
default=384,
type=int,
help=(
"The maximum total input sequence length after WordPiece tokenization. Sequences "
"longer than this will be truncated, and sequences shorter than this will be padded."
),
)
parser.add_argument(
"--doc_stride",
default=128,
type=int,
help="When splitting up a long document into chunks, how much stride to take between chunks.",
)
parser.add_argument("--per_device_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation.")
parser.add_argument(
"--n_best_size",
default=20,
type=int,
help="The total number of n-best predictions to generate in the nbest_predictions.json output file.",
)
parser.add_argument(
"--max_answer_length",
default=30,
type=int,
help=(
"The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
),
)
parser.add_argument("--seed", type=int, default=42, help="random seed for initialization")
parser.add_argument(
"--dataset_name",
type=str,
default=None,
required=True,
help="The name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--dataset_config_name",
type=str,
default=None,
help="The configuration name of the dataset to use (via the datasets library).",
)
parser.add_argument(
"--preprocessing_num_workers", type=int, default=4, help="A csv or a json file containing the training data."
)
parser.add_argument("--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets")
parser.add_argument(
"--fp16",
action="store_true",
help="Whether to use 16-bit (mixed) precision instead of 32-bit",
)
parser.add_argument(
"--int8",
action="store_true",
help="Whether to use INT8",
)
__UpperCAmelCase : Optional[Any] = parser.parse_args()
if args.tokenizer_name:
__UpperCAmelCase : Optional[Any] = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
)
logger.info("Training/evaluation parameters %s", args)
__UpperCAmelCase : List[Any] = args.per_device_eval_batch_size
__UpperCAmelCase : Tuple = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__UpperCAmelCase : str = True
__UpperCAmelCase : int = "temp_engine/bert-fp32.engine"
if args.fpaa:
__UpperCAmelCase : List[str] = "temp_engine/bert-fp16.engine"
if args.inta:
__UpperCAmelCase : Optional[Any] = "temp_engine/bert-int8.engine"
# import ONNX file
if not os.path.exists("temp_engine"):
os.makedirs("temp_engine")
__UpperCAmelCase : Optional[int] = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, "rb") as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__UpperCAmelCase : Tuple = [network.get_input(i) for i in range(network.num_inputs)]
__UpperCAmelCase : Optional[int] = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__UpperCAmelCase : int = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__UpperCAmelCase : List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__UpperCAmelCase : int = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, "wb") as f:
f.write(engine.serialize())
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: Optional[int] = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
__snake_case: int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
__snake_case: Optional[Any] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , SCREAMING_SNAKE_CASE__)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , SCREAMING_SNAKE_CASE__)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , SCREAMING_SNAKE_CASE__)
# start time
__snake_case: List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(SCREAMING_SNAKE_CASE__) for d_inp in d_inputs] + [int(SCREAMING_SNAKE_CASE__), int(SCREAMING_SNAKE_CASE__)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
cuda.memcpy_dtoh_async(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# Synchronize the stream and take time
stream.synchronize()
# end time
__snake_case: List[str] = time.time()
__snake_case: int = end_time - start_time
__snake_case: List[str] = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__UpperCAmelCase : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__UpperCAmelCase : Dict = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError("Evaluation requires a dataset name")
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__UpperCAmelCase : Union[str, Any] = raw_datasets["validation"].column_names
__UpperCAmelCase : Any = "question" if "question" in column_names else column_names[0]
__UpperCAmelCase : List[Any] = "context" if "context" in column_names else column_names[1]
__UpperCAmelCase : Tuple = "answers" if "answers" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__UpperCAmelCase : List[Any] = tokenizer.padding_side == "right"
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the'
f'model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.'
)
__UpperCAmelCase : str = min(args.max_seq_length, tokenizer.model_max_length)
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: List[str] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__snake_case: List[str] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=SCREAMING_SNAKE_CASE__ , stride=args.doc_stride , return_overflowing_tokens=SCREAMING_SNAKE_CASE__ , return_offsets_mapping=SCREAMING_SNAKE_CASE__ , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__snake_case: List[Any] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__snake_case: int = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__snake_case: int = tokenized_examples.sequence_ids(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__snake_case: int = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__snake_case: List[str] = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__UpperCAmelCase : List[Any] = raw_datasets["validation"]
# Validation Feature Creation
__UpperCAmelCase : Dict = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
__UpperCAmelCase : Optional[int] = default_data_collator
__UpperCAmelCase : Tuple = eval_dataset.remove_columns(["example_id", "offset_mapping"])
__UpperCAmelCase : List[Any] = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="eval") -> Union[str, Any]:
__snake_case: Optional[Any] = postprocess_qa_predictions(
examples=SCREAMING_SNAKE_CASE__ , features=SCREAMING_SNAKE_CASE__ , predictions=SCREAMING_SNAKE_CASE__ , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=SCREAMING_SNAKE_CASE__ , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__snake_case: Optional[int] = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
__snake_case: Optional[int] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
__snake_case: Optional[int] = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=SCREAMING_SNAKE_CASE__ , label_ids=SCREAMING_SNAKE_CASE__)
__UpperCAmelCase : List[Any] = load_metric("squad_v2" if args.version_2_with_negative else "squad")
# Evaluation!
logger.info("Loading ONNX model %s for evaluation", args.onnx_model_path)
with open(engine_name, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
return trt.volume(engine.get_binding_shape(SCREAMING_SNAKE_CASE__)) * engine.get_binding_dtype(SCREAMING_SNAKE_CASE__).itemsize
# Allocate device memory for inputs and outputs.
__UpperCAmelCase : Dict = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__UpperCAmelCase : Dict = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__UpperCAmelCase : int = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__UpperCAmelCase : List[str] = cuda.mem_alloc(h_outputa.nbytes)
__UpperCAmelCase : Optional[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__UpperCAmelCase : Optional[Any] = cuda.Stream()
# Evaluation
logger.info("***** Running Evaluation *****")
logger.info(f' Num examples = {len(eval_dataset)}')
logger.info(f' Batch size = {args.per_device_eval_batch_size}')
__UpperCAmelCase : Union[str, Any] = 0.0
__UpperCAmelCase : Union[str, Any] = 0
__UpperCAmelCase : str = timeit.default_timer()
__UpperCAmelCase : List[str] = None
for step, batch in enumerate(eval_dataloader):
__UpperCAmelCase : Dict = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__UpperCAmelCase : Optional[int] = outputs
__UpperCAmelCase : Optional[Any] = torch.tensor(start_logits)
__UpperCAmelCase : List[str] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__UpperCAmelCase : List[Any] = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__UpperCAmelCase : str = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__UpperCAmelCase : Any = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__UpperCAmelCase : List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__UpperCAmelCase : List[Any] = nested_truncate(all_preds, len(eval_dataset))
__UpperCAmelCase : Optional[Any] = timeit.default_timer() - start_time
logger.info(" Evaluation done in total %f secs (%f sec per example)", evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info("Average Inference Time = {:.3f} ms".format(total_time * 1_000 / niter))
logger.info("Total Inference Time = {:.3f} ms".format(total_time * 1_000))
logger.info("Total Number of Inference = %d", niter)
__UpperCAmelCase : int = post_processing_function(eval_examples, eval_dataset, all_preds)
__UpperCAmelCase : Any = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f'Evaluation metrics: {eval_metric}')
| 355
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
if exponent == 1:
return base
if exponent % 2 == 0:
__snake_case: str = _modexpt(__UpperCAmelCase , exponent // 2 , __UpperCAmelCase) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(__UpperCAmelCase , exponent - 1 , __UpperCAmelCase)) % modulo_value
def A__ ( SCREAMING_SNAKE_CASE__ = 1777 , SCREAMING_SNAKE_CASE__ = 1855 , SCREAMING_SNAKE_CASE__ = 8) -> int:
__snake_case: List[Any] = base
for _ in range(1 , __UpperCAmelCase):
__snake_case: Optional[int] = _modexpt(__UpperCAmelCase , __UpperCAmelCase , 10**digits)
return result
if __name__ == "__main__":
print(f'{solution() = }')
| 356
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293
| 0
|
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__UpperCAmelCase : List[Any] = pytest.mark.integration
@pytest.mark.parametrize("""path""" , ["""paws""", """csv"""])
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
inspect_dataset(lowercase_ , lowercase_)
__snake_case: List[str] = path + """.py"""
assert script_name in os.listdir(lowercase_)
assert "__pycache__" not in os.listdir(lowercase_)
@pytest.mark.filterwarnings("""ignore:inspect_metric is deprecated:FutureWarning""")
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""")
@pytest.mark.parametrize("""path""" , ["""accuracy"""])
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
inspect_metric(lowercase_ , lowercase_)
__snake_case: Optional[int] = path + """.py"""
assert script_name in os.listdir(lowercase_)
assert "__pycache__" not in os.listdir(lowercase_)
@pytest.mark.parametrize(
"""path, config_name, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[Any]:
__snake_case: Union[str, Any] = get_dataset_config_info(lowercase_ , config_name=lowercase_)
assert info.config_name == config_name
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
with pytest.raises(lowercase_):
get_dataset_config_info(lowercase_ , config_name=lowercase_)
@pytest.mark.parametrize(
"""path, expected""" , [
("""squad""", """plain_text"""),
("""acronym_identification""", """default"""),
("""lhoestq/squad""", """plain_text"""),
("""lhoestq/test""", """default"""),
("""lhoestq/demo1""", """lhoestq--demo1"""),
("""dalle-mini/wit""", """dalle-mini--wit"""),
] , )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Any = get_dataset_config_names(lowercase_)
assert expected in config_names
@pytest.mark.parametrize(
"""path, expected_configs, expected_splits_in_first_config""" , [
("""squad""", ["""plain_text"""], ["""train""", """validation"""]),
("""dalle-mini/wit""", ["""dalle-mini--wit"""], ["""train"""]),
("""paws""", ["""labeled_final""", """labeled_swap""", """unlabeled_final"""], ["""train""", """test""", """validation"""]),
] , )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: Tuple = get_dataset_infos(lowercase_)
assert list(infos.keys()) == expected_configs
__snake_case: List[str] = expected_configs[0]
assert expected_config in infos
__snake_case: Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits_in_first_config
@pytest.mark.parametrize(
"""path, expected_config, expected_splits""" , [
("""squad""", """plain_text""", ["""train""", """validation"""]),
("""dalle-mini/wit""", """dalle-mini--wit""", ["""train"""]),
("""paws""", """labeled_final""", ["""train""", """test""", """validation"""]),
] , )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: Tuple = get_dataset_infos(lowercase_)
assert expected_config in infos
__snake_case: str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys()) == expected_splits
@pytest.mark.parametrize(
"""path, config_name, expected_exception""" , [
("""paws""", None, ValueError),
] , )
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
with pytest.raises(lowercase_):
get_dataset_split_names(lowercase_ , config_name=lowercase_)
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class __snake_case ( a_ ):
lowerCAmelCase__ = """dpr"""
def __init__( self : List[Any] , A : List[str]=30_522 , A : Optional[Any]=768 , A : int=12 , A : Optional[Any]=12 , A : List[str]=3_072 , A : Dict="gelu" , A : Optional[Any]=0.1 , A : Dict=0.1 , A : int=512 , A : Dict=2 , A : Optional[Any]=0.02 , A : List[Any]=1E-12 , A : Dict=0 , A : Optional[Any]="absolute" , A : int = 0 , **A : Dict , ):
super().__init__(pad_token_id=lowercase_ , **lowercase_ )
__snake_case: int = vocab_size
__snake_case: str = hidden_size
__snake_case: Tuple = num_hidden_layers
__snake_case: Optional[Any] = num_attention_heads
__snake_case: Union[str, Any] = hidden_act
__snake_case: Dict = intermediate_size
__snake_case: Optional[int] = hidden_dropout_prob
__snake_case: Tuple = attention_probs_dropout_prob
__snake_case: Any = max_position_embeddings
__snake_case: Any = type_vocab_size
__snake_case: Tuple = initializer_range
__snake_case: Optional[Any] = layer_norm_eps
__snake_case: Dict = projection_dim
__snake_case: Optional[int] = position_embedding_type
| 358
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 0
|
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
@add_end_docstrings(
__lowerCamelCase , R"""\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n """ , )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple , A : GenericTensor ):
if self.framework == "tf":
__snake_case: List[Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
__snake_case: List[Any] = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A )
else:
raise ValueError("""Unsupported framework""" )
return masked_index
def UpperCAmelCase__ ( self : Dict , A : GenericTensor ):
__snake_case: int = self.get_masked_index(__A )
__snake_case: Union[str, Any] = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , f'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def UpperCAmelCase__ ( self : str , A : GenericTensor ):
if isinstance(__A , __A ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["""input_ids"""][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__A )
def UpperCAmelCase__ ( self : int , A : str , A : Union[str, Any]=None , **A : Optional[Any] ):
if return_tensors is None:
__snake_case: List[str] = self.framework
__snake_case: Any = self.tokenizer(__A , return_tensors=__A )
self.ensure_exactly_one_mask_token(__A )
return model_inputs
def UpperCAmelCase__ ( self : List[Any] , A : int ):
__snake_case: List[Any] = self.model(**__A )
__snake_case: Optional[int] = model_inputs["""input_ids"""]
return model_outputs
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[str, Any] , A : Optional[Any]=5 , A : Tuple=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
__snake_case: str = target_ids.shape[0]
__snake_case: Optional[Any] = model_outputs["""input_ids"""][0]
__snake_case: Optional[int] = model_outputs["""logits"""]
if self.framework == "tf":
__snake_case: Union[str, Any] = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
__snake_case: Dict = outputs.numpy()
__snake_case: Optional[int] = outputs[0, masked_index, :]
__snake_case: List[Any] = stable_softmax(__A , axis=-1 )
if target_ids is not None:
__snake_case: Union[str, Any] = tf.gather_nd(tf.squeeze(__A , 0 ) , target_ids.reshape(-1 , 1 ) )
__snake_case: Union[str, Any] = tf.expand_dims(__A , 0 )
__snake_case: List[Any] = tf.math.top_k(__A , k=__A )
__snake_case , __snake_case: Dict = topk.values.numpy(), topk.indices.numpy()
else:
__snake_case: int = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__A ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
__snake_case: List[str] = outputs[0, masked_index, :]
__snake_case: Dict = logits.softmax(dim=-1 )
if target_ids is not None:
__snake_case: Dict = probs[..., target_ids]
__snake_case , __snake_case: Dict = probs.topk(__A )
__snake_case: Dict = []
__snake_case: Dict = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
__snake_case: Union[str, Any] = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
__snake_case: Union[str, Any] = input_ids.numpy().copy()
if target_ids is not None:
__snake_case: str = target_ids[p].tolist()
__snake_case: Tuple = p
# Filter padding out:
__snake_case: int = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
__snake_case: List[str] = self.tokenizer.decode(__A , skip_special_tokens=__A )
__snake_case: int = {"""score""": v, """token""": p, """token_str""": self.tokenizer.decode([p] ), """sequence""": sequence}
row.append(__A )
result.append(__A )
if single_mask:
return result[0]
return result
def UpperCAmelCase__ ( self : Tuple , A : List[Any] , A : Tuple=None ):
if isinstance(__A , __A ):
__snake_case: Any = [targets]
try:
__snake_case: int = self.tokenizer.get_vocab()
except Exception:
__snake_case: Dict = {}
__snake_case: List[str] = []
for target in targets:
__snake_case: Any = vocab.get(__A , __A )
if id_ is None:
__snake_case: Any = self.tokenizer(
__A , add_special_tokens=__A , return_attention_mask=__A , return_token_type_ids=__A , max_length=1 , truncation=__A , )["""input_ids"""]
if len(__A ) == 0:
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
"""We cannot replace it with anything meaningful, ignoring it""" )
continue
__snake_case: List[str] = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f'''The specified target token `{target}` does not exist in the model vocabulary. '''
f'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
__snake_case: Optional[Any] = list(set(__A ) )
if len(__A ) == 0:
raise ValueError("""At least one target must be provided when passed.""" )
__snake_case: int = np.array(__A )
return target_ids
def UpperCAmelCase__ ( self : Tuple , A : str=None , A : Tuple=None ):
__snake_case: List[str] = {}
if targets is not None:
__snake_case: int = self.get_target_ids(__A , __A )
__snake_case: int = target_ids
if top_k is not None:
__snake_case: str = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"""fill-mask""" , self.model.base_model_prefix , """The tokenizer does not define a `mask_token`.""" )
return {}, {}, postprocess_params
def __call__( self : Any , A : Union[str, Any] , *A : str , **A : str ):
__snake_case: Union[str, Any] = super().__call__(__A , **__A )
if isinstance(__A , __A ) and len(__A ) == 1:
return outputs[0]
return outputs
| 359
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 0
|
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
super().tearDown()
gc.collect()
def UpperCAmelCase__ ( self : Any ):
__snake_case: int = FlaxStableDiffusionPipeline.from_pretrained(
"""stabilityai/stable-diffusion-2""" , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case: Any = """A painting of a squirrel eating a burger"""
__snake_case: Union[str, Any] = jax.device_count()
__snake_case: List[str] = num_samples * [prompt]
__snake_case: Optional[Any] = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
__snake_case: List[str] = replicate(_SCREAMING_SNAKE_CASE )
__snake_case: Any = shard(_SCREAMING_SNAKE_CASE )
__snake_case: int = jax.random.PRNGKey(0 )
__snake_case: Tuple = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
__snake_case: Dict = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__snake_case: Union[str, Any] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case: int = images[0, 253:256, 253:256, -1]
__snake_case: Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case: str = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.4_5508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = """stabilityai/stable-diffusion-2"""
__snake_case: Dict = FlaxDPMSolverMultistepScheduler.from_pretrained(_SCREAMING_SNAKE_CASE , subfolder="""scheduler""" )
__snake_case: Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , revision="""bf16""" , dtype=jnp.bfloataa , )
__snake_case: Optional[Any] = scheduler_params
__snake_case: int = """A painting of a squirrel eating a burger"""
__snake_case: Optional[Any] = jax.device_count()
__snake_case: Optional[Any] = num_samples * [prompt]
__snake_case: Any = sd_pipe.prepare_inputs(_SCREAMING_SNAKE_CASE )
__snake_case: Optional[int] = replicate(_SCREAMING_SNAKE_CASE )
__snake_case: Dict = shard(_SCREAMING_SNAKE_CASE )
__snake_case: int = jax.random.PRNGKey(0 )
__snake_case: Tuple = jax.random.split(_SCREAMING_SNAKE_CASE , jax.device_count() )
__snake_case: List[str] = sd_pipe(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num_inference_steps=25 , jit=_SCREAMING_SNAKE_CASE )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
__snake_case: Dict = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
__snake_case: Optional[Any] = images[0, 253:256, 253:256, -1]
__snake_case: Any = jnp.asarray(jax.device_get(image_slice.flatten() ) )
__snake_case: List[Any] = jnp.array([0.4336, 0.4_2969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 360
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 0
|
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = tempfile.mkdtemp()
__snake_case: Union[str, Any] = 8
# DPR tok
__snake_case: Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
__snake_case: Optional[int] = os.path.join(self.tmpdirname , """dpr_tokenizer""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__snake_case: List[str] = os.path.join(lowerCamelCase_ , DPR_VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
# BART tok
__snake_case: Optional[int] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
__snake_case: Optional[Any] = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
__snake_case: Tuple = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
__snake_case: int = {"""unk_token""": """<unk>"""}
__snake_case: List[Any] = os.path.join(self.tmpdirname , """bart_tokenizer""" )
os.makedirs(lowerCamelCase_ , exist_ok=lowerCamelCase_ )
__snake_case: Union[str, Any] = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["""vocab_file"""] )
__snake_case: Optional[Any] = os.path.join(lowerCamelCase_ , BART_VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase_ ) )
def UpperCAmelCase__ ( self : Optional[int] ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , """dpr_tokenizer""" ) )
def UpperCAmelCase__ ( self : Dict ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , """bart_tokenizer""" ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = os.path.join(self.tmpdirname , """rag_tokenizer""" )
__snake_case: List[str] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
__snake_case: Optional[Any] = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(lowerCamelCase_ )
rag_tokenizer.save_pretrained(lowerCamelCase_ )
__snake_case: Any = RagTokenizer.from_pretrained(lowerCamelCase_ , config=lowerCamelCase_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , lowerCamelCase_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[int] = RagTokenizer.from_pretrained("""facebook/rag-token-nq""" )
__snake_case: Tuple = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
__snake_case: Optional[int] = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = RagTokenizer.from_pretrained("""facebook/rag-sequence-nq""" )
__snake_case: int = [
"""who got the first nobel prize in physics""",
"""when is the next deadpool movie being released""",
"""which mode is used for short wave broadcast service""",
"""who is the owner of reading football club""",
"""when is the next scandal episode coming out""",
"""when is the last time the philadelphia won the superbowl""",
"""what is the most current adobe flash player version""",
"""how many episodes are there in dragon ball z""",
"""what is the first step in the evolution of the eye""",
"""where is gall bladder situated in human body""",
"""what is the main mineral in lithium batteries""",
"""who is the president of usa right now""",
"""where do the greasers live in the outsiders""",
"""panda is a national animal of which country""",
"""what is the name of manchester united stadium""",
]
__snake_case: Tuple = tokenizer(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
__UpperCAmelCase : Dict = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
__UpperCAmelCase : Optional[int] = direct_transformers_import(PATH_TO_TRANSFORMERS)
__UpperCAmelCase : List[Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
__UpperCAmelCase : str = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
"CLIPSegConfig": True,
"DeformableDetrConfig": True,
"DetaConfig": True,
"DinatConfig": True,
"DonutSwinConfig": True,
"EfficientFormerConfig": True,
"FSMTConfig": True,
"JukeboxConfig": True,
"LayoutLMv2Config": True,
"MaskFormerSwinConfig": True,
"MT5Config": True,
"NatConfig": True,
"OneFormerConfig": True,
"PerceiverConfig": True,
"RagConfig": True,
"SpeechT5Config": True,
"SwinConfig": True,
"Swin2SRConfig": True,
"Swinv2Config": True,
"SwitchTransformersConfig": True,
"TableTransformerConfig": True,
"TapasConfig": True,
"TransfoXLConfig": True,
"UniSpeechConfig": True,
"UniSpeechSatConfig": True,
"WavLMConfig": True,
"WhisperConfig": True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
"JukeboxPriorConfig": True,
# TODO: @Younes (for `is_decoder`)
"Pix2StructTextConfig": True,
}
)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: Tuple = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'''config.{attribute}''' in modeling_source
or F'''getattr(config, "{attribute}"''' in modeling_source
or F'''getattr(self.config, "{attribute}"''' in modeling_source
):
__snake_case: Optional[Any] = True
# Deal with multi-line cases
elif (
re.search(
rF'''getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"''' , __lowerCAmelCase , )
is not None
):
__snake_case: List[Any] = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
__snake_case: Optional[Any] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
__snake_case: List[str] = [
'''bos_index''',
'''eos_index''',
'''pad_index''',
'''unk_index''',
'''mask_index''',
'''image_size''',
'''use_cache''',
'''out_features''',
'''out_indices''',
]
__snake_case: Optional[int] = ['''encoder_no_repeat_ngram_size''']
# Special cases to be allowed
__snake_case: Tuple = True
if not attribute_used:
__snake_case: Optional[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
__snake_case: Any = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
__snake_case: Optional[int] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
__snake_case: List[str] = True
elif attribute.endswith("""_token_id"""):
__snake_case: Dict = True
# configuration class specific cases
if not case_allowed:
__snake_case: Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [])
__snake_case: Optional[Any] = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Union[str, Any] = dict(inspect.signature(config_class.__init__).parameters)
__snake_case: Optional[int] = [x for x in list(signature.keys()) if x not in ['''self''', '''kwargs''']]
__snake_case: List[Any] = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
__snake_case: Tuple = {}
if len(config_class.attribute_map) > 0:
__snake_case: List[Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
__snake_case: Dict = inspect.getsourcefile(__lowerCAmelCase)
__snake_case: int = os.path.dirname(__lowerCAmelCase)
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
__snake_case: List[str] = [os.path.join(__lowerCAmelCase , __lowerCAmelCase) for fn in os.listdir(__lowerCAmelCase) if fn.startswith("""modeling_""")]
# Get the source code strings
__snake_case: List[Any] = []
for path in modeling_paths:
if os.path.isfile(__lowerCAmelCase):
with open(__lowerCAmelCase) as fp:
modeling_sources.append(fp.read())
__snake_case: int = []
for config_param, default_value in zip(__lowerCAmelCase , __lowerCAmelCase):
# `attributes` here is all the variant names for `config_param`
__snake_case: Tuple = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param])
if not check_attribute_being_used(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase):
unused_attributes.append(attributes[0])
return sorted(__lowerCAmelCase)
def A__ ( ) -> Optional[Any]:
__snake_case: Optional[Any] = {}
for _config_class in list(CONFIG_MAPPING.values()):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
__snake_case: Tuple = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class) , lambda SCREAMING_SNAKE_CASE__: inspect.isclass(__lowerCAmelCase)
and issubclass(__lowerCAmelCase , __lowerCAmelCase)
and inspect.getmodule(__lowerCAmelCase) == inspect.getmodule(_config_class) , )
]
for config_class in config_classes_in_module:
__snake_case: Dict = check_config_attributes_being_used(__lowerCAmelCase)
if len(__lowerCAmelCase) > 0:
__snake_case: List[str] = unused_attributes
if len(__lowerCAmelCase) > 0:
__snake_case: Union[str, Any] = '''The following configuration classes contain unused attributes in the corresponding modeling files:\n'''
for name, attributes in configs_with_unused_attributes.items():
error += F'''{name}: {attributes}\n'''
raise ValueError(__lowerCAmelCase)
if __name__ == "__main__":
check_config_attributes()
| 362
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 0
|
class __snake_case :
'''simple docstring'''
def __init__( self : List[str] , A : Optional[int] ):
__snake_case: Tuple = len(lowerCAmelCase__ )
__snake_case: Optional[Any] = [0] * len_array
if len_array > 0:
__snake_case: List[str] = array[0]
for i in range(1 , lowerCAmelCase__ ):
__snake_case: List[str] = self.prefix_sum[i - 1] + array[i]
def UpperCAmelCase__ ( self : Any , A : List[str] , A : Dict ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def UpperCAmelCase__ ( self : int , A : List[str] ):
__snake_case: List[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase__ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 363
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
| 0
|
"""simple docstring"""
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__) -> float:
__snake_case: str = 0.00
__snake_case: Any = 0
for resistor in resistors:
if resistor <= 0:
__snake_case: Optional[int] = F'''Resistor at index {index} has a negative or zero value!'''
raise ValueError(SCREAMING_SNAKE_CASE__)
first_sum += 1 / float(SCREAMING_SNAKE_CASE__)
index += 1
return 1 / first_sum
def A__ ( SCREAMING_SNAKE_CASE__) -> float:
__snake_case: Any = 0.00
__snake_case: int = 0
for resistor in resistors:
sum_r += resistor
if resistor < 0:
__snake_case: Any = F'''Resistor at index {index} has a negative value!'''
raise ValueError(SCREAMING_SNAKE_CASE__)
index += 1
return sum_r
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 0
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : Optional[int] = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Tuple = 250_004
__UpperCAmelCase : List[Any] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : str ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[Any] = MBartaaTokenizer(lowerCAmelCase_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[Any] = """<s>"""
__snake_case: Union[str, Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ) , lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Any ):
__snake_case: List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(lowerCAmelCase_ ) , 1_054 )
def UpperCAmelCase__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[int] = MBartaaTokenizer(lowerCAmelCase_ , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=lowerCAmelCase_ )
__snake_case: List[Any] = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCAmelCase_ , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Tuple = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCAmelCase_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: Optional[Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_ , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_ , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Union[str, Any] = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
__snake_case: List[str] = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
__snake_case: Optional[int] = tempfile.mkdtemp()
__snake_case: Optional[Any] = tokenizer_r.save_pretrained(lowerCAmelCase_ )
__snake_case: List[Any] = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Tuple = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
__snake_case: Optional[Any] = tokenizer_r.from_pretrained(lowerCAmelCase_ )
__snake_case: Dict = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=True
__snake_case: Union[str, Any] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
__snake_case: List[str] = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it save with the same files
self.assertSequenceEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Checks everything loads correctly in the same way
__snake_case: str = tokenizer_r.from_pretrained(lowerCAmelCase_ )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
# Save tokenizer rust, legacy_format=False
__snake_case: Optional[Any] = tempfile.mkdtemp()
__snake_case: str = tokenizer_r.save_pretrained(lowerCAmelCase_ , legacy_format=lowerCAmelCase_ )
__snake_case: int = tokenizer_p.save_pretrained(lowerCAmelCase_ )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: int = tokenizer_r.from_pretrained(lowerCAmelCase_ )
__snake_case: int = tokenizer_p.from_pretrained(lowerCAmelCase_ )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) )
shutil.rmtree(lowerCAmelCase_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = '''facebook/mbart-large-50-one-to-many-mmt'''
lowerCAmelCase__ = [
''' UN Chief Says There Is No Military Solution in Syria''',
''' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.''',
]
lowerCAmelCase__ = [
'''Şeful ONU declară că nu există o soluţie militară în Siria''',
'''Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'''
''' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'''
''' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.''',
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Tuple = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: List[str] = 1
return cls
def UpperCAmelCase__ ( self : Optional[int] ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
def UpperCAmelCase__ ( self : str ):
self.assertIn(lowerCAmelCase_ , self.tokenizer.all_special_ids )
__snake_case: int = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: Optional[Any] = self.tokenizer.decode(lowerCAmelCase_ , skip_special_tokens=lowerCAmelCase_ )
__snake_case: Optional[int] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=lowerCAmelCase_ )
self.assertEqual(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertNotIn(self.tokenizer.eos_token , lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Tuple = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , lowerCAmelCase_ )
__snake_case: List[Any] = 10
__snake_case: Optional[Any] = self.tokenizer(lowerCAmelCase_ , max_length=lowerCAmelCase_ , truncation=lowerCAmelCase_ ).input_ids[0]
self.assertEqual(ids[0] , lowerCAmelCase_ )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(lowerCAmelCase_ ) , lowerCAmelCase_ )
def UpperCAmelCase__ ( self : Any ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = tempfile.mkdtemp()
__snake_case: Optional[int] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCAmelCase_ )
__snake_case: str = MBartaaTokenizer.from_pretrained(lowerCAmelCase_ )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , lowerCAmelCase_ )
@require_torch
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , return_tensors="""pt""" )
__snake_case: Tuple = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[str] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: int = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , lowerCAmelCase_ )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = self.tokenizer(self.src_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=3 , return_tensors="""pt""" )
__snake_case: List[str] = self.tokenizer(
text_target=self.tgt_text , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=10 , return_tensors="""pt""" )
__snake_case: int = targets["""input_ids"""]
__snake_case: str = shift_tokens_right(lowerCAmelCase_ , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : int ):
__snake_case: str = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 365
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 293
| 0
|
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__UpperCAmelCase : Any = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
__UpperCAmelCase : Optional[Any] = subprocess.check_output(f'git diff --name-only {fork_point_sha}'.split()).decode("utf-8").split()
__UpperCAmelCase : List[Any] = '|'.join(sys.argv[1:])
__UpperCAmelCase : int = re.compile(Rf'^({joined_dirs}).*?\.py$')
__UpperCAmelCase : int = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : int = {
"configuration_timesformer": ["TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "TimesformerConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TimesformerModel",
"TimesformerForVideoClassification",
"TimesformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_timesformer import TIMESFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, TimesformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_timesformer import (
TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimesformerForVideoClassification,
TimesformerModel,
TimesformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
class __snake_case ( _a ):
'''simple docstring'''
lowerCAmelCase__ = ["""audio_values""", """audio_mask"""]
def __init__( self : Union[str, Any] , A : Any=2_048 , A : Any=1 , A : Optional[int]=[16, 16] , A : str=128 , A : int=44_100 , A : Any=86 , A : str=2_048 , A : Optional[Any]=0.0 , **A : Dict , ):
super().__init__(
feature_size=A , sampling_rate=A , padding_value=A , **A , )
__snake_case: str = spectrogram_length
__snake_case: Optional[int] = num_channels
__snake_case: Optional[Any] = patch_size
__snake_case: Any = feature_size // self.patch_size[1]
__snake_case: Optional[int] = n_fft
__snake_case: Optional[int] = sampling_rate // hop_length_to_sampling_rate
__snake_case: Optional[Any] = sampling_rate
__snake_case: Optional[int] = padding_value
__snake_case: Tuple = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=A , min_frequency=0.0 , max_frequency=22_050.0 , sampling_rate=A , norm="""slaney""" , mel_scale="""slaney""" , ).T
def UpperCAmelCase__ ( self : List[str] , A : Tuple ):
__snake_case: Tuple = spectrogram(
A , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
__snake_case: List[Any] = log_spec[:, :-1]
__snake_case: Union[str, Any] = log_spec - 20.0
__snake_case: Any = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : int , A : Tuple , A : Tuple = None , A : Dict = True , A : int = None , A : int = False , A : List[Any] = False , **A : List[str] , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
__snake_case: int = isinstance(A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
__snake_case: Tuple = is_batched_numpy or (
isinstance(A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__snake_case: Dict = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(A , np.ndarray ):
__snake_case: Optional[int] = np.asarray(A , dtype=np.floataa )
elif isinstance(A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__snake_case: Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__snake_case: Any = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__snake_case: Optional[Any] = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , A ):
__snake_case: Tuple = [np.asarray(A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__snake_case: Dict = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__snake_case: str = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__snake_case: Any = np.array(A ).astype(np.floataa )
# convert into correct format for padding
__snake_case: Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__snake_case: List[Any] = np.ones([len(A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__snake_case: Tuple = padded_audio_features * self.padding_value
for i in range(len(A ) ):
__snake_case: Any = audio_features[i]
__snake_case: Any = feature
# return as BatchFeature
if return_attention_mask:
__snake_case: Union[str, Any] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__snake_case: Dict = {'audio_values': padded_audio_features}
__snake_case: Tuple = BatchFeature(data=A , tensor_type=A )
return encoded_inputs
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
__UpperCAmelCase : List[Any] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = ["BeitFeatureExtractor"]
__UpperCAmelCase : Optional[Any] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Union[str, Any] = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if is_torch_version("""<""" , """2.0.0""") or not hasattr(snake_case_ , """_dynamo"""):
return False
return isinstance(snake_case_ , torch._dynamo.eval_frame.OptimizedModule)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True) -> int:
__snake_case: Optional[int] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__snake_case: Union[str, Any] = is_compiled_module(snake_case_)
if is_compiled:
__snake_case: Any = model
__snake_case: Union[str, Any] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(snake_case_ , snake_case_):
__snake_case: Union[str, Any] = model.module
if not keep_fpaa_wrapper:
__snake_case: int = getattr(snake_case_ , """forward""")
__snake_case: Any = model.__dict__.pop("""_original_forward""" , snake_case_)
if original_forward is not None:
while hasattr(snake_case_ , """__wrapped__"""):
__snake_case: Any = forward.__wrapped__
if forward == original_forward:
break
__snake_case: List[str] = forward
if getattr(snake_case_ , """_converted_to_transformer_engine""" , snake_case_):
convert_model(snake_case_ , to_transformer_engine=snake_case_)
if is_compiled:
__snake_case: List[Any] = model
__snake_case: Optional[Any] = compiled_model
return model
def A__ ( ) -> Union[str, Any]:
PartialState().wait_for_everyone()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
if PartialState().distributed_type == DistributedType.TPU:
xm.save(snake_case_ , snake_case_)
elif PartialState().local_process_index == 0:
torch.save(snake_case_ , snake_case_)
@contextmanager
def A__ ( **SCREAMING_SNAKE_CASE__) -> Optional[Any]:
for key, value in kwargs.items():
__snake_case: Optional[Any] = str(snake_case_)
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
if not hasattr(snake_case_ , """__qualname__""") and not hasattr(snake_case_ , """__name__"""):
__snake_case: Optional[Any] = getattr(snake_case_ , """__class__""" , snake_case_)
if hasattr(snake_case_ , """__qualname__"""):
return obj.__qualname__
if hasattr(snake_case_ , """__name__"""):
return obj.__name__
return str(snake_case_)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
for key, value in source.items():
if isinstance(snake_case_ , snake_case_):
__snake_case: List[Any] = destination.setdefault(snake_case_ , {})
merge_dicts(snake_case_ , snake_case_)
else:
__snake_case: Optional[int] = value
return destination
def A__ ( SCREAMING_SNAKE_CASE__ = None) -> Optional[Any]:
if port is None:
__snake_case: List[str] = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM) as s:
return s.connect_ex(("""localhost""", port)) == 0
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self : Dict , A : Optional[Any] , A : Optional[Any]=13 , A : List[str]=7 , A : List[str]=True , A : Optional[int]=True , A : List[Any]=True , A : Tuple=True , A : Optional[int]=99 , A : Any=64 , A : Union[str, Any]=32 , A : Optional[int]=5 , A : Optional[Any]=4 , A : Optional[int]=37 , A : Union[str, Any]="gelu" , A : Optional[Any]=0.1 , A : List[Any]=0.1 , A : Tuple=512 , A : int=16 , A : List[str]=2 , A : List[str]=0.02 , A : Tuple=3 , A : Optional[int]=4 , A : Dict=None , ):
__snake_case: Optional[Any] = parent
__snake_case: Optional[int] = batch_size
__snake_case: Tuple = seq_length
__snake_case: List[str] = is_training
__snake_case: List[Any] = use_input_mask
__snake_case: str = use_token_type_ids
__snake_case: int = use_labels
__snake_case: str = vocab_size
__snake_case: int = hidden_size
__snake_case: Optional[Any] = embedding_size
__snake_case: List[str] = num_hidden_layers
__snake_case: List[str] = num_attention_heads
__snake_case: int = intermediate_size
__snake_case: Optional[int] = hidden_act
__snake_case: Tuple = hidden_dropout_prob
__snake_case: Any = attention_probs_dropout_prob
__snake_case: List[str] = max_position_embeddings
__snake_case: Any = type_vocab_size
__snake_case: Optional[int] = type_sequence_label_size
__snake_case: Optional[Any] = initializer_range
__snake_case: Optional[int] = num_labels
__snake_case: Optional[Any] = num_choices
__snake_case: Any = scope
def UpperCAmelCase__ ( self : int ):
__snake_case: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case: Any = None
if self.use_input_mask:
__snake_case: List[str] = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case: Any = None
if self.use_token_type_ids:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case: Dict = None
__snake_case: Optional[Any] = None
__snake_case: List[str] = None
if self.use_labels:
__snake_case: Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case: Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size] , self.num_choices )
__snake_case: Tuple = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : List[Any] ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Tuple , A : Optional[int] , A : str , A : Dict , A : int , A : int , A : Any , A : Optional[Any] ):
__snake_case: Dict = MobileBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__snake_case: Optional[int] = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__snake_case: Dict = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , A : Union[str, Any] , A : Union[str, Any] , A : int , A : List[str] , A : List[str] , A : Tuple , A : Optional[int] ):
__snake_case: str = MobileBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: List[str] = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : Tuple , A : List[Any] , A : Optional[Any] , A : Any , A : Optional[Any] , A : int , A : List[str] , A : List[str] ):
__snake_case: Tuple = MobileBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: str = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : Tuple , A : Any , A : List[str] , A : str , A : str , A : Tuple , A : int , A : int ):
__snake_case: Any = MobileBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: str = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def UpperCAmelCase__ ( self : Tuple , A : Any , A : List[str] , A : Dict , A : int , A : Optional[int] , A : Optional[Any] , A : List[str] ):
__snake_case: str = MobileBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: int = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : Tuple , A : Any , A : Optional[int] , A : int , A : Dict , A : Dict ):
__snake_case: Optional[Any] = self.num_labels
__snake_case: Optional[Any] = MobileBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: int = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] , A : List[str] , A : str , A : int , A : str , A : Union[str, Any] , A : int , A : int ):
__snake_case: str = self.num_labels
__snake_case: List[str] = MobileBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: Dict = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Dict , A : Optional[int] , A : List[Any] , A : List[Any] , A : Tuple , A : str , A : List[str] , A : str ):
__snake_case: Optional[Any] = self.num_choices
__snake_case: Any = MobileBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__snake_case: int = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case: Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case: Optional[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__snake_case: Optional[Any] = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = self.prepare_config_and_inputs()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
): str = config_and_inputs
__snake_case: List[str] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( _a , _a , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileBertModel,
"""fill-mask""": MobileBertForMaskedLM,
"""question-answering""": MobileBertForQuestionAnswering,
"""text-classification""": MobileBertForSequenceClassification,
"""token-classification""": MobileBertForTokenClassification,
"""zero-shot""": MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[int] , A : str , A : Dict=False ):
__snake_case: Any = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
__snake_case: Any = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
__snake_case: Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = MobileBertModelTester(self )
__snake_case: List[str] = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def UpperCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__lowerCAmelCase )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__lowerCAmelCase )
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
return torch.tensor(
a__ , dtype=torch.long , device=a__ , )
__UpperCAmelCase : Union[str, Any] = 1e-3
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[int] = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(__lowerCAmelCase )
__snake_case: Tuple = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__snake_case: Any = model(__lowerCAmelCase )[0]
__snake_case: Tuple = torch.Size((1, 9, 512) )
self.assertEqual(output.shape , __lowerCAmelCase )
__snake_case: Optional[Any] = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=__lowerCAmelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__snake_case: Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__snake_case: Any = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound )
| 371
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293
| 0
|
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__UpperCAmelCase : List[str] = logging.get_logger(__name__)
class __snake_case ( snake_case_ ):
def __init__( self : Any , *A : Optional[int] , **A : int ):
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" , _A , )
super().__init__(*_A , **_A )
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 0
|
from datetime import datetime as dt
import os
from github import Github
__UpperCAmelCase : int = [
"good first issue",
"good second issue",
"good difficult issue",
"feature request",
"new model",
"wip",
]
def A__ ( ) -> str:
__snake_case: Dict = Github(os.environ["""GITHUB_TOKEN"""])
__snake_case: Any = g.get_repo("""huggingface/transformers""")
__snake_case: Any = repo.get_issues(state="""open""")
for issue in open_issues:
__snake_case: Union[str, Any] = sorted([comment for comment in issue.get_comments()] , key=lambda SCREAMING_SNAKE_CASE__: i.created_at , reverse=A__)
__snake_case: Union[str, Any] = comments[0] if len(A__) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="""closed""")
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""")
if __name__ == "__main__":
main()
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293
| 0
|
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__UpperCAmelCase : str = "src/diffusers"
__UpperCAmelCase : str = "."
# This is to make sure the diffusers module imported is the one in the repo.
__UpperCAmelCase : int = importlib.util.spec_from_file_location(
"diffusers",
os.path.join(DIFFUSERS_PATH, "__init__.py"),
submodule_search_locations=[DIFFUSERS_PATH],
)
__UpperCAmelCase : Any = spec.loader.load_module()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
return line.startswith(lowerCAmelCase__) or len(lowerCAmelCase__) <= 1 or re.search(r"""^\s*\)(\s*->.*:|:)\s*$""" , lowerCAmelCase__) is not None
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
__snake_case: str = object_name.split(""".""")
__snake_case: Optional[int] = 0
# First let's find the module where our object lives.
__snake_case: str = parts[i]
while i < len(lowerCAmelCase__) and not os.path.isfile(os.path.join(lowerCAmelCase__ , F'''{module}.py''')):
i += 1
if i < len(lowerCAmelCase__):
__snake_case: List[Any] = os.path.join(lowerCAmelCase__ , parts[i])
if i >= len(lowerCAmelCase__):
raise ValueError(F'''`object_name` should begin with the name of a module of diffusers but got {object_name}.''')
with open(os.path.join(lowerCAmelCase__ , F'''{module}.py''') , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: Dict = f.readlines()
# Now let's find the class / func in the code!
__snake_case: str = ''''''
__snake_case: Optional[int] = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__) and re.search(rF'''^{indent}(class|def)\s+{name}(\(|\:)''' , lines[line_index]) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__):
raise ValueError(F''' {object_name} does not match any function or class in {module}.''')
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
__snake_case: List[str] = line_index
while line_index < len(lowerCAmelCase__) and _should_continue(lines[line_index] , lowerCAmelCase__):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
__snake_case: Optional[Any] = lines[start_index:line_index]
return "".join(lowerCAmelCase__)
__UpperCAmelCase : Union[str, Any] = re.compile(R"^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)")
__UpperCAmelCase : List[Any] = re.compile(R"^\s*(\S+)->(\S+)(\s+.*|$)")
__UpperCAmelCase : Union[str, Any] = re.compile(R"<FILL\s+[^>]*>")
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: Union[str, Any] = code.split("""\n""")
__snake_case: List[Any] = 0
while idx < len(lowerCAmelCase__) and len(lines[idx]) == 0:
idx += 1
if idx < len(lowerCAmelCase__):
return re.search(r"""^(\s*)\S""" , lines[idx]).groups()[0]
return ""
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: List[Any] = len(get_indent(lowerCAmelCase__)) > 0
if has_indent:
__snake_case: List[str] = F'''class Bla:\n{code}'''
__snake_case: Optional[Any] = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__)
__snake_case: Optional[Any] = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__)
__snake_case: Optional[Any] = style_docstrings_in_code(lowerCAmelCase__)
return result[len("""class Bla:\n""") :] if has_indent else result
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Optional[Any]:
with open(lowerCAmelCase__ , """r""" , encoding="""utf-8""" , newline="""\n""") as f:
__snake_case: str = f.readlines()
__snake_case: Dict = []
__snake_case: int = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__):
__snake_case: Optional[Any] = _re_copy_warning.search(lines[line_index])
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
__snake_case: Optional[int] = search.groups()
__snake_case: List[Any] = find_code_in_diffusers(lowerCAmelCase__)
__snake_case: int = get_indent(lowerCAmelCase__)
__snake_case: Optional[Any] = line_index + 1 if indent == theoretical_indent else line_index + 2
__snake_case: List[Any] = theoretical_indent
__snake_case: Tuple = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
__snake_case: List[str] = True
while line_index < len(lowerCAmelCase__) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__):
break
__snake_case: Any = lines[line_index]
__snake_case: Any = _should_continue(lowerCAmelCase__ , lowerCAmelCase__) and re.search(F'''^{indent}# End copy''' , lowerCAmelCase__) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1]) <= 1:
line_index -= 1
__snake_case: Any = lines[start_index:line_index]
__snake_case: str = ''''''.join(lowerCAmelCase__)
# Remove any nested `Copied from` comments to avoid circular copies
__snake_case: Optional[Any] = [line for line in theoretical_code.split("""\n""") if _re_copy_warning.search(lowerCAmelCase__) is None]
__snake_case: List[Any] = '''\n'''.join(lowerCAmelCase__)
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__) > 0:
__snake_case: Union[str, Any] = replace_pattern.replace("""with""" , """""").split(""",""")
__snake_case: List[Any] = [_re_replace_pattern.search(lowerCAmelCase__) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
__snake_case: Tuple = pattern.groups()
__snake_case: List[str] = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
if option.strip() == "all-casing":
__snake_case: Optional[Any] = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__)
__snake_case: List[Any] = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__)
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
__snake_case: Dict = blackify(lines[start_index - 1] + theoretical_code)
__snake_case: List[str] = theoretical_code[len(lines[start_index - 1]) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index])
if overwrite:
__snake_case: Union[str, Any] = lines[:start_index] + [theoretical_code] + lines[line_index:]
__snake_case: Dict = start_index + 1
if overwrite and len(lowerCAmelCase__) > 0:
# Warn the user a file has been modified.
print(F'''Detected changes, rewriting {filename}.''')
with open(lowerCAmelCase__ , """w""" , encoding="""utf-8""" , newline="""\n""") as f:
f.writelines(lowerCAmelCase__)
return diffs
def A__ ( SCREAMING_SNAKE_CASE__ = False) -> Optional[int]:
__snake_case: Optional[int] = glob.glob(os.path.join(lowerCAmelCase__ , """**/*.py""") , recursive=lowerCAmelCase__)
__snake_case: List[Any] = []
for filename in all_files:
__snake_case: Union[str, Any] = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__)
diffs += [F'''- {filename}: copy does not match {d[0]} at line {d[1]}''' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__) > 0:
__snake_case: Tuple = '''\n'''.join(lowerCAmelCase__)
raise Exception(
"""Found the following copy inconsistencies:\n"""
+ diff
+ """\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.""")
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : Tuple = parser.parse_args()
check_copies(args.fix_and_overwrite)
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
from copy import deepcopy
from typing import Optional, Union
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_tf_available, is_torch_available
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["image_processor"]
lowerCAmelCase__ = "SamImageProcessor"
def __init__( self : Dict , A : int ):
super().__init__(A )
__snake_case: Optional[int] = self.image_processor
__snake_case: Union[str, Any] = -10
__snake_case: List[Any] = self.image_processor.size["""longest_edge"""]
def __call__( self : str , A : str=None , A : Any=None , A : Tuple=None , A : Optional[Any]=None , A : Optional[Union[str, TensorType]] = None , **A : Optional[int] , ):
__snake_case: str = self.image_processor(
A , return_tensors=A , **A , )
# pop arguments that are not used in the foward but used nevertheless
__snake_case: List[Any] = encoding_image_processor["""original_sizes"""]
if hasattr(A , """numpy""" ): # Checks if Torch or TF tensor
__snake_case: Optional[int] = original_sizes.numpy()
__snake_case , __snake_case , __snake_case: List[Any] = self._check_and_preprocess_points(
input_points=A , input_labels=A , input_boxes=A , )
__snake_case: Any = self._normalize_and_convert(
A , A , input_points=A , input_labels=A , input_boxes=A , return_tensors=A , )
return encoding_image_processor
def UpperCAmelCase__ ( self : List[Any] , A : int , A : Optional[int] , A : Optional[int]=None , A : Optional[int]=None , A : Optional[int]=None , A : Union[str, Any]="pt" , ):
if input_points is not None:
if len(A ) != len(A ):
__snake_case: Union[str, Any] = [
self._normalize_coordinates(self.target_size , A , original_sizes[0] ) for point in input_points
]
else:
__snake_case: Dict = [
self._normalize_coordinates(self.target_size , A , A )
for point, original_size in zip(A , A )
]
# check that all arrays have the same shape
if not all(point.shape == input_points[0].shape for point in input_points ):
if input_labels is not None:
__snake_case , __snake_case: str = self._pad_points_and_labels(A , A )
__snake_case: Optional[int] = np.array(A )
if input_labels is not None:
__snake_case: Optional[int] = np.array(A )
if input_boxes is not None:
if len(A ) != len(A ):
__snake_case: Any = [
self._normalize_coordinates(self.target_size , A , original_sizes[0] , is_bounding_box=A )
for box in input_boxes
]
else:
__snake_case: Optional[Any] = [
self._normalize_coordinates(self.target_size , A , A , is_bounding_box=A )
for box, original_size in zip(A , A )
]
__snake_case: Any = np.array(A )
if input_boxes is not None:
if return_tensors == "pt":
__snake_case: Tuple = torch.from_numpy(A )
# boxes batch size of 1 by default
__snake_case: Tuple = input_boxes.unsqueeze(1 ) if len(input_boxes.shape ) != 3 else input_boxes
elif return_tensors == "tf":
__snake_case: Optional[Any] = tf.convert_to_tensor(A )
# boxes batch size of 1 by default
__snake_case: Union[str, Any] = tf.expand_dims(A , 1 ) if len(input_boxes.shape ) != 3 else input_boxes
encoding_image_processor.update({"""input_boxes""": input_boxes} )
if input_points is not None:
if return_tensors == "pt":
__snake_case: Optional[int] = torch.from_numpy(A )
# point batch size of 1 by default
__snake_case: Optional[int] = input_points.unsqueeze(1 ) if len(input_points.shape ) != 4 else input_points
elif return_tensors == "tf":
__snake_case: List[Any] = tf.convert_to_tensor(A )
# point batch size of 1 by default
__snake_case: Any = tf.expand_dims(A , 1 ) if len(input_points.shape ) != 4 else input_points
encoding_image_processor.update({"""input_points""": input_points} )
if input_labels is not None:
if return_tensors == "pt":
__snake_case: Union[str, Any] = torch.from_numpy(A )
# point batch size of 1 by default
__snake_case: Tuple = input_labels.unsqueeze(1 ) if len(input_labels.shape ) != 3 else input_labels
elif return_tensors == "tf":
__snake_case: str = tf.convert_to_tensor(A )
# point batch size of 1 by default
__snake_case: Dict = tf.expand_dims(A , 1 ) if len(input_labels.shape ) != 3 else input_labels
encoding_image_processor.update({"""input_labels""": input_labels} )
return encoding_image_processor
def UpperCAmelCase__ ( self : List[str] , A : List[Any] , A : Any ):
__snake_case: Optional[Any] = max([point.shape[0] for point in input_points] )
__snake_case: List[str] = []
for i, point in enumerate(A ):
if point.shape[0] != expected_nb_points:
__snake_case: Union[str, Any] = np.concatenate(
[point, np.zeros((expected_nb_points - point.shape[0], 2) ) + self.point_pad_value] , axis=0 )
__snake_case: Any = np.append(input_labels[i] , [self.point_pad_value] )
processed_input_points.append(A )
__snake_case: Optional[int] = processed_input_points
return input_points, input_labels
def UpperCAmelCase__ ( self : Tuple , A : int , A : np.ndarray , A : Any , A : List[str]=False ):
__snake_case , __snake_case: Dict = original_size
__snake_case , __snake_case: int = self.image_processor._get_preprocess_shape(A , longest_edge=A )
__snake_case: Tuple = deepcopy(A ).astype(A )
if is_bounding_box:
__snake_case: Tuple = coords.reshape(-1 , 2 , 2 )
__snake_case: int = coords[..., 0] * (new_w / old_w)
__snake_case: Any = coords[..., 1] * (new_h / old_h)
if is_bounding_box:
__snake_case: str = coords.reshape(-1 , 4 )
return coords
def UpperCAmelCase__ ( self : Tuple , A : Any=None , A : List[Any]=None , A : Dict=None , ):
if input_points is not None:
if hasattr(A , """numpy""" ): # Checks for TF or Torch tensor
__snake_case: List[Any] = input_points.numpy().tolist()
if not isinstance(A , A ) or not isinstance(input_points[0] , A ):
raise ValueError("""Input points must be a list of list of floating points.""" )
__snake_case: int = [np.array(A ) for input_point in input_points]
else:
__snake_case: Optional[int] = None
if input_labels is not None:
if hasattr(A , """numpy""" ):
__snake_case: int = input_labels.numpy().tolist()
if not isinstance(A , A ) or not isinstance(input_labels[0] , A ):
raise ValueError("""Input labels must be a list of list integers.""" )
__snake_case: List[Any] = [np.array(A ) for label in input_labels]
else:
__snake_case: Optional[Any] = None
if input_boxes is not None:
if hasattr(A , """numpy""" ):
__snake_case: Any = input_boxes.numpy().tolist()
if (
not isinstance(A , A )
or not isinstance(input_boxes[0] , A )
or not isinstance(input_boxes[0][0] , A )
):
raise ValueError("""Input boxes must be a list of list of list of floating points.""" )
__snake_case: Optional[int] = [np.array(A ).astype(np.floataa ) for box in input_boxes]
else:
__snake_case: Dict = None
return input_points, input_labels, input_boxes
@property
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.image_processor.model_input_names
return list(dict.fromkeys(A ) )
def UpperCAmelCase__ ( self : str , *A : List[str] , **A : int ):
return self.image_processor.post_process_masks(*A , **A )
| 353
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293
| 0
|
from jiwer import compute_measures
import datasets
__UpperCAmelCase : Dict = "\\n@inproceedings{inproceedings,\n author = {Morris, Andrew and Maier, Viktoria and Green, Phil},\n year = {2004},\n month = {01},\n pages = {},\n title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}\n}\n"
__UpperCAmelCase : List[str] = "\\nWord error rate (WER) is a common metric of the performance of an automatic speech recognition system.\n\nThe general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.\n\nThis problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.\n\nWord error rate can then be computed as:\n\nWER = (S + D + I) / N = (S + D + I) / (S + D + C)\n\nwhere\n\nS is the number of substitutions,\nD is the number of deletions,\nI is the number of insertions,\nC is the number of correct words,\nN is the number of words in the reference (N=S+D+C).\n\nThis value indicates the average number of errors per reference word. The lower the value, the better the\nperformance of the ASR system with a WER of 0 being a perfect score.\n"
__UpperCAmelCase : Optional[int] = "\nCompute WER score of transcribed segments against references.\n\nArgs:\n references: List of references for each speech input.\n predictions: List of transcriptions to score.\n concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.\n\nReturns:\n (float): the word error rate\n\nExamples:\n\n >>> predictions = [\"this is the prediction\", \"there is an other sample\"]\n >>> references = [\"this is the reference\", \"there is another one\"]\n >>> wer = datasets.load_metric(\"wer\")\n >>> wer_score = wer.compute(predictions=predictions, references=references)\n >>> print(wer_score)\n 0.5\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/jitsi/jiwer/"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/Word_error_rate""",
] , )
def UpperCAmelCase__ ( self : str , A : List[Any]=None , A : Union[str, Any]=None , A : int=False ):
if concatenate_texts:
return compute_measures(a__ , a__ )["wer"]
else:
__snake_case: Any = 0
__snake_case: str = 0
for prediction, reference in zip(a__ , a__ ):
__snake_case: Any = compute_measures(a__ , a__ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: int = AutoModelForSeqaSeqLM.from_pretrained("""google/mt5-small""" , return_dict=UpperCamelCase_ ).to(UpperCamelCase_ )
__snake_case: int = AutoTokenizer.from_pretrained("""google/mt5-small""" )
__snake_case: Dict = tokenizer("""Hello there""" , return_tensors="""pt""" ).input_ids
__snake_case: Tuple = tokenizer("""Hi I am""" , return_tensors="""pt""" ).input_ids
__snake_case: Any = model(input_ids.to(UpperCamelCase_ ) , labels=labels.to(UpperCamelCase_ ) ).loss
__snake_case: Dict = -(labels.shape[-1] * loss.item())
__snake_case: int = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1E-4 )
| 355
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: Optional[int] = len(lowerCamelCase_)
for i in range(1 , lowerCamelCase_):
__snake_case: int = collection[i]
__snake_case: Union[str, Any] = 0
__snake_case: Union[str, Any] = i - 1
while low <= high:
__snake_case: Any = (low + high) // 2
if val < collection[mid]:
__snake_case: int = mid - 1
else:
__snake_case: Dict = mid + 1
for j in range(lowerCamelCase_ , lowerCamelCase_ , -1):
__snake_case: str = collection[j - 1]
__snake_case: int = val
return collection
if __name__ == "__main__":
__UpperCAmelCase : Union[str, Any] = input("Enter numbers separated by a comma:\n").strip()
__UpperCAmelCase : List[Any] = [int(item) for item in user_input.split(",")]
print(binary_insertion_sort(unsorted))
| 356
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293
| 0
|
from __future__ import annotations
from random import choice
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
return choice(UpperCAmelCase_)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Optional[int] = random_pivot(UpperCAmelCase_)
# partition based on pivot
# linear time
__snake_case: List[Any] = [e for e in lst if e < pivot]
__snake_case: Any = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(UpperCAmelCase_) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(UpperCAmelCase_) < k - 1:
return kth_number(UpperCAmelCase_ , k - len(UpperCAmelCase_) - 1)
# pivot is in elements smaller than k
else:
return kth_number(UpperCAmelCase_ , UpperCAmelCase_)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__)
class __snake_case ( lowerCamelCase__ ):
lowerCAmelCase__ = CLIPConfig
lowerCAmelCase__ = ["""CLIPEncoderLayer"""]
def __init__( self : Dict , A : CLIPConfig ):
super().__init__(__snake_case )
__snake_case: Optional[int] = CLIPVisionModelWithProjection(config.vision_config )
__snake_case: List[str] = nn.Linear(config.vision_config.projection_dim , 1 )
__snake_case: Optional[Any] = nn.Linear(config.vision_config.projection_dim , 1 )
@torch.no_grad()
def UpperCAmelCase__ ( self : Any , A : Union[str, Any] , A : List[Any] , A : Tuple=0.5 , A : Optional[int]=0.5 ):
__snake_case: Any = self.vision_model(__snake_case )[0]
__snake_case: Tuple = self.p_head(__snake_case )
__snake_case: Optional[Any] = nsfw_detected.flatten()
__snake_case: Optional[int] = nsfw_detected > p_threshold
__snake_case: Union[str, Any] = nsfw_detected.tolist()
if any(__snake_case ):
logger.warning(
"""Potential NSFW content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, nsfw_detected_ in enumerate(__snake_case ):
if nsfw_detected_:
__snake_case: Tuple = np.zeros(images[idx].shape )
__snake_case: List[Any] = self.w_head(__snake_case )
__snake_case: Dict = watermark_detected.flatten()
__snake_case: str = watermark_detected > w_threshold
__snake_case: int = watermark_detected.tolist()
if any(__snake_case ):
logger.warning(
"""Potential watermarked content was detected in one or more images. A black image will be returned instead."""
""" Try again with a different prompt and/or seed.""" )
for idx, watermark_detected_ in enumerate(__snake_case ):
if watermark_detected_:
__snake_case: Dict = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected
| 358
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 0
|
import math
from datetime import datetime, timedelta
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
__snake_case: Any = year % 19
__snake_case: Dict = year % 4
__snake_case: Dict = year % 7
__snake_case: Optional[Any] = math.floor(year / 100)
__snake_case: Union[str, Any] = math.floor((13 + 8 * leap_day_inhibits) / 25)
__snake_case: List[Any] = leap_day_inhibits / 4
__snake_case: Tuple = (
15 - lunar_orbit_correction + leap_day_inhibits - leap_day_reinstall_number
) % 30
__snake_case: List[str] = (4 + leap_day_inhibits - leap_day_reinstall_number) % 7
# days to be added to March 21
__snake_case: int = (19 * metonic_cycle + secular_moon_shift) % 30
# PHM -> Paschal Full Moon
__snake_case: Optional[Any] = (
2 * julian_leap_year
+ 4 * non_leap_year
+ 6 * days_to_add
+ century_starting_point
) % 7
if days_to_add == 29 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase__ , 4 , 19)
elif days_to_add == 28 and days_from_phm_to_sunday == 6:
return datetime(lowerCAmelCase__ , 4 , 18)
else:
return datetime(lowerCAmelCase__ , 3 , 22) + timedelta(
days=int(days_to_add + days_from_phm_to_sunday))
if __name__ == "__main__":
for year in (1_994, 2_000, 2_010, 2_021, 2_023):
__UpperCAmelCase : str = "will be" if year > datetime.now().year else "was"
print(f'Easter in {year} {tense} {gauss_easter(year)}')
| 359
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 0
|
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : List[Any] = logging.get_logger(__name__)
def A__ ( SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: List[Any] = MobileNetVaConfig(layer_norm_eps=0.001)
if "_quant" in model_name:
raise ValueError("""Quantized models are not supported.""")
__snake_case: Dict = re.match(r"""^mobilenet_v1_([^_]*)_([^_]*)$""" , SCREAMING_SNAKE_CASE__)
if matches:
__snake_case: Union[str, Any] = float(matches[1])
__snake_case: Tuple = int(matches[2])
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
__snake_case: str = 1001
__snake_case: List[Any] = """imagenet-1k-id2label.json"""
__snake_case: List[Any] = """huggingface/label-files"""
__snake_case: str = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""") , """r"""))
__snake_case: Union[str, Any] = {int(SCREAMING_SNAKE_CASE__) + 1: v for k, v in idalabel.items()}
__snake_case: List[str] = """background"""
__snake_case: Optional[int] = idalabel
__snake_case: int = {v: k for k, v in idalabel.items()}
return config
def A__ ( ) -> Union[str, Any]:
__snake_case: str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case: int = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__).raw)
return im
@torch.no_grad()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Dict:
__snake_case: List[Any] = get_mobilenet_va_config(SCREAMING_SNAKE_CASE__)
# Load 🤗 model
__snake_case: Optional[int] = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
__snake_case: Dict = MobileNetVaImageProcessor(
crop_size={"""width""": config.image_size, """height""": config.image_size} , size={"""shortest_edge""": config.image_size + 32} , )
__snake_case: str = image_processor(images=prepare_img() , return_tensors="""pt""")
__snake_case: Optional[Any] = model(**SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
__snake_case: Optional[Any] = torch.tensor([-4.1_739, -1.1_233, 3.1_205])
elif model_name == "mobilenet_v1_0.75_192":
__snake_case: Union[str, Any] = torch.tensor([-3.9_440, -2.3_141, -0.3_333])
else:
__snake_case: Tuple = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4)
Path(SCREAMING_SNAKE_CASE__).mkdir(exist_ok=SCREAMING_SNAKE_CASE__)
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''')
model.save_pretrained(SCREAMING_SNAKE_CASE__)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__)
if push_to_hub:
print("""Pushing to the hub...""")
__snake_case: str = """google/""" + model_name
image_processor.push_to_hub(SCREAMING_SNAKE_CASE__)
model.push_to_hub(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you'd like to convert. Should in the form 'mobilenet_v1_<depth>_<size>'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
__UpperCAmelCase : int = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 360
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 0
|
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , lowerCamelCase__ ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = load_tool("""text-to-speech""" )
self.tool.setup()
def UpperCAmelCase__ ( self : List[str] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__snake_case: Any = self.tool("""hey""" )
__snake_case: str = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__snake_case: str = self.tool("""hey""" )
__snake_case: Any = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""image_processor""", """tokenizer"""]
lowerCAmelCase__ = """AutoImageProcessor"""
lowerCAmelCase__ = """AutoTokenizer"""
def __init__( self : List[str] , A : Dict=None , A : Optional[int]=None , **A : Union[str, Any] ):
__snake_case: Optional[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __a , )
__snake_case: Tuple = kwargs.pop("""feature_extractor""" )
__snake_case: Union[str, Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__a , __a )
__snake_case: Dict = self.image_processor
__snake_case: Optional[Any] = False
def __call__( self : Optional[Any] , *A : List[str] , **A : Tuple ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*__a , **__a )
__snake_case: Tuple = kwargs.pop("""images""" , __a )
__snake_case: Any = kwargs.pop("""text""" , __a )
if len(__a ) > 0:
__snake_case: Any = args[0]
__snake_case: Any = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
__snake_case: List[str] = self.image_processor(__a , *__a , **__a )
if text is not None:
__snake_case: int = self.tokenizer(__a , **__a )
if text is None:
return inputs
elif images is None:
return encodings
else:
__snake_case: Optional[Any] = encodings["""input_ids"""]
return inputs
def UpperCAmelCase__ ( self : Union[str, Any] , *A : Optional[int] , **A : int ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCAmelCase__ ( self : List[str] , *A : Any , **A : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@contextmanager
def UpperCAmelCase__ ( self : List[str] ):
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
__snake_case: int = True
__snake_case: Dict = self.tokenizer
yield
__snake_case: Dict = self.image_processor
__snake_case: Dict = False
def UpperCAmelCase__ ( self : Optional[Any] , A : int , A : Optional[Any]=False , A : Optional[int]=None ):
if added_vocab is None:
__snake_case: Tuple = self.tokenizer.get_added_vocab()
__snake_case: Tuple = {}
while tokens:
__snake_case: Tuple = re.search(r"""<s_(.*?)>""" , __a , re.IGNORECASE )
if start_token is None:
break
__snake_case: str = start_token.group(1 )
__snake_case: Any = re.search(rf'''</s_{key}>''' , __a , re.IGNORECASE )
__snake_case: Optional[Any] = start_token.group()
if end_token is None:
__snake_case: str = tokens.replace(__a , """""" )
else:
__snake_case: int = end_token.group()
__snake_case: Union[str, Any] = re.escape(__a )
__snake_case: Optional[Any] = re.escape(__a )
__snake_case: List[str] = re.search(f'''{start_token_escaped}(.*?){end_token_escaped}''' , __a , re.IGNORECASE )
if content is not None:
__snake_case: str = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__snake_case: Optional[int] = self.tokenajson(__a , is_inner_value=__a , added_vocab=__a )
if value:
if len(__a ) == 1:
__snake_case: List[Any] = value[0]
__snake_case: Dict = value
else: # leaf nodes
__snake_case: str = []
for leaf in content.split(r"""<sep/>""" ):
__snake_case: Tuple = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__snake_case: Tuple = leaf[1:-2] # for categorical special tokens
output[key].append(__a )
if len(output[key] ) == 1:
__snake_case: Any = output[key][0]
__snake_case: Optional[int] = tokens[tokens.find(__a ) + len(__a ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__a , added_vocab=__a )
if len(__a ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def UpperCAmelCase__ ( self : Tuple ):
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __a , )
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : str ):
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __a , )
return self.image_processor
| 362
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {"tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Optional[int] = {
"tokenizer_file": {
"bigscience/tokenizer": "https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json",
"bigscience/bloom-560m": "https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json",
"bigscience/bloom-1b1": "https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json",
"bigscience/bloom-1b7": "https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json",
"bigscience/bloom-3b": "https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json",
"bigscience/bloom-7b1": "https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json",
"bigscience/bloom": "https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json",
},
}
class __snake_case ( _snake_case ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = None
def __init__( self : Optional[Any] , A : Tuple=None , A : Union[str, Any]=None , A : List[str]=None , A : Any="<unk>" , A : Tuple="<s>" , A : Dict="</s>" , A : Optional[int]="<pad>" , A : int=False , A : int=False , **A : str , ):
super().__init__(
UpperCamelCase__ , UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , unk_token=UpperCamelCase__ , bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , add_prefix_space=UpperCamelCase__ , clean_up_tokenization_spaces=UpperCamelCase__ , **UpperCamelCase__ , )
__snake_case: Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , UpperCamelCase__ ) != add_prefix_space:
__snake_case: List[Any] = getattr(UpperCamelCase__ , pre_tok_state.pop("""type""" ) )
__snake_case: str = add_prefix_space
__snake_case: Any = pre_tok_class(**UpperCamelCase__ )
__snake_case: Union[str, Any] = add_prefix_space
def UpperCAmelCase__ ( self : Optional[Any] , *A : str , **A : Tuple ):
__snake_case: List[Any] = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Union[str, Any] , *A : str , **A : Optional[Any] ):
__snake_case: Any = kwargs.get("""is_split_into_words""" , UpperCamelCase__ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'''
""" pretokenized inputs.""" )
return super()._encode_plus(*UpperCamelCase__ , **UpperCamelCase__ )
def UpperCAmelCase__ ( self : Dict , A : Dict , A : Tuple = None ):
__snake_case: Optional[int] = self._tokenizer.model.save(UpperCamelCase__ , name=UpperCamelCase__ )
return tuple(UpperCamelCase__ )
def UpperCAmelCase__ ( self : Optional[int] , A : Optional[Any] ):
__snake_case: Any = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ) + [self.eos_token_id] )
if len(UpperCamelCase__ ) > self.model_max_length:
__snake_case: Any = input_ids[-self.model_max_length :]
return input_ids
| 363
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
| 0
|
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__UpperCAmelCase : Optional[Any] = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: str = global_rng
__snake_case: Dict = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Optional[int] , A : Any , A : Dict=7 , A : Optional[Any]=400 , A : Optional[Any]=2_000 , A : Dict=24 , A : Dict=24 , A : int=0.0 , A : List[str]=16_000 , A : Optional[Any]=True , A : Union[str, Any]=True , ):
__snake_case: List[Any] = parent
__snake_case: int = batch_size
__snake_case: Dict = min_seq_length
__snake_case: int = max_seq_length
__snake_case: int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Optional[int] = feature_size
__snake_case: Optional[Any] = num_mel_bins
__snake_case: Dict = padding_value
__snake_case: Union[str, Any] = sampling_rate
__snake_case: List[str] = return_attention_mask
__snake_case: List[str] = do_normalize
def UpperCAmelCase__ ( self : List[Any] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Optional[int] , A : Union[str, Any]=False , A : List[Any]=False ):
def _flatten(A : int ):
return list(itertools.chain(*_A ) )
if equal_length:
__snake_case: Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: Tuple = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Union[str, Any] = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( snake_case__ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[Any] = SpeechaTextFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : Dict , A : Union[str, Any] ):
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1E-3 ) )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__snake_case: Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: Union[str, Any] = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
__snake_case: List[str] = feature_extractor(_A , padding=_A , return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__snake_case: Optional[Any] = feature_extractor(speech_inputs[0] , return_tensors="""np""" ).input_features
__snake_case: Any = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test batched
__snake_case: List[Any] = feature_extractor(_A , return_tensors="""np""" ).input_features
__snake_case: int = feature_extractor(_A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__snake_case: List[Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: List[str] = np.asarray(_A )
__snake_case: List[Any] = feature_extractor(_A , return_tensors="""np""" ).input_features
__snake_case: Union[str, Any] = feature_extractor(_A , return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: List[Any] = ["""longest""", """max_length""", """do_not_pad"""]
__snake_case: str = [None, 16, None]
for max_length, padding in zip(_A , _A ):
__snake_case: Optional[Any] = feature_extractor(
_A , padding=_A , max_length=_A , return_attention_mask=_A )
__snake_case: Dict = inputs.input_features
__snake_case: Tuple = inputs.attention_mask
__snake_case: List[str] = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: int = ["""longest""", """max_length""", """do_not_pad"""]
__snake_case: Optional[Any] = [None, 16, None]
for max_length, padding in zip(_A , _A ):
__snake_case: str = feature_extractor(
_A , max_length=_A , padding=_A , return_tensors="""np""" , return_attention_mask=_A )
__snake_case: Optional[Any] = inputs.input_features
__snake_case: Optional[Any] = inputs.attention_mask
__snake_case: Union[str, Any] = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: Tuple = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: int = feature_extractor(
_A , padding="""max_length""" , max_length=4 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , )
__snake_case: Dict = inputs.input_features
__snake_case: List[Any] = inputs.attention_mask
__snake_case: Union[str, Any] = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: List[str] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: Dict = feature_extractor(
_A , padding="""longest""" , max_length=4 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , )
__snake_case: Optional[int] = inputs.input_features
__snake_case: int = inputs.attention_mask
__snake_case: str = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
__snake_case: Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: List[Any] = feature_extractor(
_A , padding="""longest""" , max_length=16 , truncation=_A , return_tensors="""np""" , return_attention_mask=_A , )
__snake_case: Any = inputs.input_features
__snake_case: List[Any] = inputs.attention_mask
__snake_case: Any = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def UpperCAmelCase__ ( self : Dict ):
import torch
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: Optional[int] = np.random.rand(100 , 32 ).astype(np.floataa )
__snake_case: str = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__snake_case: str = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__snake_case: Any = feature_extractor.pad([{"""input_features""": inputs}] , return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Tuple , A : Tuple ):
from datasets import load_dataset
__snake_case: List[Any] = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[str] = ds.sort("""id""" ).select(range(_A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: str = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__snake_case: List[Any] = feature_extractor(_A , return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , _A , atol=1E-4 ) )
| 364
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 0
|
from typing import Optional
import pyspark
from .. import Features, NamedSplit
from ..download import DownloadMode
from ..packaged_modules.spark.spark import Spark
from .abc import AbstractDatasetReader
class __snake_case ( a__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , A : List[str] , A : List[str] = None , A : Optional[Any] = None , A : Optional[Any] = True , A : Any = None , A : Optional[int] = False , A : Tuple = None , A : str = True , A : Any = "arrow" , **A : int , ):
super().__init__(
split=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , keep_in_memory=SCREAMING_SNAKE_CASE_ , streaming=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
__snake_case: Any = load_from_cache_file
__snake_case: Any = file_format
__snake_case: str = Spark(
df=SCREAMING_SNAKE_CASE_ , features=SCREAMING_SNAKE_CASE_ , cache_dir=SCREAMING_SNAKE_CASE_ , working_dir=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if self.streaming:
return self.builder.as_streaming_dataset(split=self.split )
__snake_case: Dict = None if self._load_from_cache_file else DownloadMode.FORCE_REDOWNLOAD
self.builder.download_and_prepare(
download_mode=SCREAMING_SNAKE_CASE_ , file_format=self._file_format , )
return self.builder.as_dataset(split=self.split )
| 365
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 293
| 0
|
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__UpperCAmelCase : str = "true"
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=82 , SCREAMING_SNAKE_CASE__=16) -> Union[str, Any]:
set_seed(42)
__snake_case: Optional[Any] = RegressionModel()
__snake_case: Dict = deepcopy(lowerCAmelCase__)
__snake_case: int = RegressionDataset(length=lowerCAmelCase__)
__snake_case: Any = DataLoader(lowerCAmelCase__ , batch_size=lowerCAmelCase__)
model.to(accelerator.device)
__snake_case , __snake_case: Any = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__)
return model, ddp_model, dataloader
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> List[Any]:
__snake_case: List[str] = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""")
__snake_case: Optional[Any] = load_dataset("""glue""" , """mrpc""" , split="""validation""")
def tokenize_function(SCREAMING_SNAKE_CASE__):
__snake_case: Tuple = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__)
return outputs
with accelerator.main_process_first():
__snake_case: Union[str, Any] = dataset.map(
lowerCAmelCase__ , batched=lowerCAmelCase__ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
__snake_case: List[Any] = tokenized_datasets.rename_column("""label""" , """labels""")
def collate_fn(SCREAMING_SNAKE_CASE__):
if use_longest:
return tokenizer.pad(lowerCAmelCase__ , padding="""longest""" , return_tensors="""pt""")
return tokenizer.pad(lowerCAmelCase__ , padding="""max_length""" , max_length=128 , return_tensors="""pt""")
return DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , collate_fn=lowerCAmelCase__ , batch_size=16)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: str = Accelerator(dispatch_batches=lowerCAmelCase__ , split_batches=lowerCAmelCase__)
__snake_case: List[Any] = get_dataloader(lowerCAmelCase__ , not dispatch_batches)
__snake_case: Optional[int] = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowerCAmelCase__)
__snake_case , __snake_case: int = accelerator.prepare(lowerCAmelCase__ , lowerCAmelCase__)
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: List[Any] = []
for batch in dataloader:
__snake_case , __snake_case: Union[str, Any] = batch.values()
with torch.no_grad():
__snake_case: List[str] = model(lowerCAmelCase__)
__snake_case , __snake_case: Union[str, Any] = accelerator.gather_for_metrics((logit, target))
logits_and_targets.append((logit, target))
__snake_case , __snake_case: Optional[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase__)
targs.append(lowerCAmelCase__)
__snake_case , __snake_case: Optional[int] = torch.cat(lowerCAmelCase__), torch.cat(lowerCAmelCase__)
return logits, targs
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=82 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=16) -> Dict:
__snake_case , __snake_case , __snake_case: Optional[Any] = get_basic_setup(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
__snake_case , __snake_case: Tuple = generate_predictions(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__)
assert (
len(lowerCAmelCase__) == num_samples
), F'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase__)}'''
def A__ ( SCREAMING_SNAKE_CASE__ = False , SCREAMING_SNAKE_CASE__ = False) -> Union[str, Any]:
__snake_case: Any = evaluate.load("""glue""" , """mrpc""")
__snake_case , __snake_case: Tuple = get_mrpc_setup(lowerCAmelCase__ , lowerCAmelCase__)
# First do baseline
__snake_case , __snake_case , __snake_case: Any = setup["""no"""]
model.to(lowerCAmelCase__)
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase__)
with torch.inference_mode():
__snake_case: Any = model(**lowerCAmelCase__)
__snake_case: Dict = outputs.logits.argmax(dim=-1)
metric.add_batch(predictions=lowerCAmelCase__ , references=batch["""labels"""])
__snake_case: Dict = metric.compute()
# Then do distributed
__snake_case , __snake_case , __snake_case: List[Any] = setup["""ddp"""]
model.eval()
for batch in dataloader:
with torch.inference_mode():
__snake_case: Union[str, Any] = model(**lowerCAmelCase__)
__snake_case: str = outputs.logits.argmax(dim=-1)
__snake_case: str = batch["""labels"""]
__snake_case , __snake_case: List[str] = accelerator.gather_for_metrics((preds, references))
metric.add_batch(predictions=lowerCAmelCase__ , references=lowerCAmelCase__)
__snake_case: Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key]), F'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def A__ ( ) -> Optional[int]:
__snake_case: List[str] = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""")
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''')
test_mrpc(lowerCAmelCase__ , lowerCAmelCase__)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""")
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__snake_case: int = Accelerator(split_batches=lowerCAmelCase__ , dispatch_batches=lowerCAmelCase__)
if accelerator.is_local_main_process:
print(F'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''')
test_torch_metrics(lowerCAmelCase__ , 99)
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""")
__snake_case: Union[str, Any] = Accelerator()
test_torch_metrics(lowerCAmelCase__ , 512)
accelerator.state._reset_state()
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
main()
if __name__ == "__main__":
main()
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class __snake_case ( UpperCAmelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = 42
@flax_register_to_config
class __snake_case ( nn.Module , UpperCAmelCase_ , UpperCAmelCase_ ):
'''simple docstring'''
lowerCAmelCase__ = 32
lowerCAmelCase__ = 4
lowerCAmelCase__ = 4
lowerCAmelCase__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
lowerCAmelCase__ = False
lowerCAmelCase__ = (3_20, 6_40, 12_80, 12_80)
lowerCAmelCase__ = 2
lowerCAmelCase__ = 8
lowerCAmelCase__ = None
lowerCAmelCase__ = 12_80
lowerCAmelCase__ = 0.0
lowerCAmelCase__ = False
lowerCAmelCase__ = jnp.floataa
lowerCAmelCase__ = True
lowerCAmelCase__ = 0
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[Any] , A : int ):
# init input tensors
__snake_case: int = (1, self.in_channels, self.sample_size, self.sample_size)
__snake_case: Tuple = jnp.zeros(__lowercase , dtype=jnp.floataa )
__snake_case: int = jnp.ones((1,) , dtype=jnp.intaa )
__snake_case: Tuple = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__snake_case: Optional[Any] = jax.random.split(__lowercase )
__snake_case: List[str] = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(__lowercase , __lowercase , __lowercase , __lowercase )["params"]
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = self.block_out_channels
__snake_case: Dict = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__snake_case: str = self.num_attention_heads or self.attention_head_dim
# input
__snake_case: int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__snake_case: Union[str, Any] = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__snake_case: List[str] = FlaxTimestepEmbedding(__lowercase , dtype=self.dtype )
__snake_case: str = self.only_cross_attention
if isinstance(__lowercase , __lowercase ):
__snake_case: Tuple = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__lowercase , __lowercase ):
__snake_case: Any = (num_attention_heads,) * len(self.down_block_types )
# down
__snake_case: List[Any] = []
__snake_case: Dict = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__snake_case: Dict = output_channel
__snake_case: int = block_out_channels[i]
__snake_case: List[Any] = i == len(__lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__snake_case: Tuple = FlaxCrossAttnDownBlockaD(
in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case: List[str] = FlaxDownBlockaD(
in_channels=__lowercase , out_channels=__lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__lowercase )
__snake_case: List[str] = down_blocks
# mid
__snake_case: str = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__snake_case: Tuple = []
__snake_case: List[Any] = list(reversed(__lowercase ) )
__snake_case: int = list(reversed(__lowercase ) )
__snake_case: Tuple = list(reversed(__lowercase ) )
__snake_case: List[Any] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__snake_case: Any = output_channel
__snake_case: Any = reversed_block_out_channels[i]
__snake_case: Tuple = reversed_block_out_channels[min(i + 1 , len(__lowercase ) - 1 )]
__snake_case: Any = i == len(__lowercase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__snake_case: Tuple = FlaxCrossAttnUpBlockaD(
in_channels=__lowercase , out_channels=__lowercase , prev_output_channel=__lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case: int = FlaxUpBlockaD(
in_channels=__lowercase , out_channels=__lowercase , prev_output_channel=__lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__lowercase )
__snake_case: Optional[int] = output_channel
__snake_case: Optional[int] = up_blocks
# out
__snake_case: str = nn.GroupNorm(num_groups=32 , epsilon=1E-5 )
__snake_case: Tuple = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : str , A : Tuple , A : Dict , A : Union[str, Any] , A : str=None , A : int=None , A : Optional[int] = True , A : Optional[Any] = False , ):
# 1. time
if not isinstance(__lowercase , jnp.ndarray ):
__snake_case: List[Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__snake_case: List[str] = timesteps.astype(dtype=jnp.floataa )
__snake_case: Any = jnp.expand_dims(__lowercase , 0 )
__snake_case: Dict = self.time_proj(__lowercase )
__snake_case: List[str] = self.time_embedding(__lowercase )
# 2. pre-process
__snake_case: Optional[Any] = jnp.transpose(__lowercase , (0, 2, 3, 1) )
__snake_case: Optional[int] = self.conv_in(__lowercase )
# 3. down
__snake_case: int = (sample,)
for down_block in self.down_blocks:
if isinstance(__lowercase , __lowercase ):
__snake_case: str = down_block(__lowercase , __lowercase , __lowercase , deterministic=not train )
else:
__snake_case: List[Any] = down_block(__lowercase , __lowercase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__snake_case: Optional[int] = ()
for down_block_res_sample, down_block_additional_residual in zip(
__lowercase , __lowercase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__snake_case: int = new_down_block_res_samples
# 4. mid
__snake_case: List[str] = self.mid_block(__lowercase , __lowercase , __lowercase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__snake_case: Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
__snake_case: Union[str, Any] = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__lowercase , __lowercase ):
__snake_case: List[Any] = up_block(
__lowercase , temb=__lowercase , encoder_hidden_states=__lowercase , res_hidden_states_tuple=__lowercase , deterministic=not train , )
else:
__snake_case: List[str] = up_block(__lowercase , temb=__lowercase , res_hidden_states_tuple=__lowercase , deterministic=not train )
# 6. post-process
__snake_case: int = self.conv_norm_out(__lowercase )
__snake_case: Optional[Any] = nn.silu(__lowercase )
__snake_case: Tuple = self.conv_out(__lowercase )
__snake_case: Optional[int] = jnp.transpose(__lowercase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__lowercase )
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
__snake_case: Any = original_name.split(""".""")[0]
__snake_case: Optional[int] = key.split(""".""")
__snake_case: List[Any] = int(key_list[key_list.index(snake_case_) - 2])
__snake_case: Optional[int] = int(key_list[key_list.index(snake_case_) - 1])
__snake_case: str = orig_block_num - offset
__snake_case: Tuple = key.replace(F'''{orig_block_num}.{layer_num}.{original_name}''' , F'''block.{new_block_num}.{layer_num}.{new_name}''')
return key
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Union[str, Any] = OrderedDict()
__snake_case , __snake_case: Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
__snake_case: List[Any] = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
__snake_case: List[str] = key[: key.find("""proj""")]
__snake_case: Optional[Any] = key.replace(snake_case_ , F'''patch_embeddings.{total_embed_found}.''')
__snake_case: Union[str, Any] = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
__snake_case: Any = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
__snake_case: Tuple = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
__snake_case: Dict = replace_key_with_offset(snake_case_ , snake_case_ , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
__snake_case: Optional[Any] = replace_key_with_offset(snake_case_ , snake_case_ , """norm1""" , """before_norm""")
if "norm2" in key:
__snake_case: Any = replace_key_with_offset(snake_case_ , snake_case_ , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
__snake_case: Tuple = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
__snake_case: List[str] = replace_key_with_offset(snake_case_ , snake_case_ , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
__snake_case: Any = key.replace("""head""" , """classifier""")
__snake_case: List[Any] = value
return new_state_dict
def A__ ( ) -> Any:
__snake_case: Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__snake_case: Union[str, Any] = Image.open(requests.get(snake_case_ , stream=snake_case_).raw)
return image
@torch.no_grad()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case: Dict = PoolFormerConfig()
# set attributes based on model_name
__snake_case: str = """huggingface/label-files"""
__snake_case: Dict = model_name[-3:]
__snake_case: Optional[int] = 1000
__snake_case: Dict = """imagenet-1k-id2label.json"""
__snake_case: Optional[Any] = (1, 1000)
# set config attributes
__snake_case: Optional[int] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="""dataset""") , """r"""))
__snake_case: str = {int(snake_case_): v for k, v in idalabel.items()}
__snake_case: List[str] = idalabel
__snake_case: Optional[int] = {v: k for k, v in idalabel.items()}
if size == "s12":
__snake_case: List[Any] = [2, 2, 6, 2]
__snake_case: int = [64, 128, 320, 512]
__snake_case: Dict = 4.0
__snake_case: Any = 0.9
elif size == "s24":
__snake_case: Optional[Any] = [4, 4, 12, 4]
__snake_case: Optional[Any] = [64, 128, 320, 512]
__snake_case: Dict = 4.0
__snake_case: List[str] = 0.9
elif size == "s36":
__snake_case: Optional[Any] = [6, 6, 18, 6]
__snake_case: List[Any] = [64, 128, 320, 512]
__snake_case: int = 4.0
__snake_case: List[Any] = 1e-6
__snake_case: Any = 0.9
elif size == "m36":
__snake_case: str = [6, 6, 18, 6]
__snake_case: str = [96, 192, 384, 768]
__snake_case: Any = 4.0
__snake_case: Tuple = 1e-6
__snake_case: Union[str, Any] = 0.95
elif size == "m48":
__snake_case: List[str] = [8, 8, 24, 8]
__snake_case: List[Any] = [96, 192, 384, 768]
__snake_case: str = 4.0
__snake_case: List[Any] = 1e-6
__snake_case: List[Any] = 0.95
else:
raise ValueError(F'''Size {size} not supported''')
# load image processor
__snake_case: int = PoolFormerImageProcessor(crop_pct=snake_case_)
# Prepare image
__snake_case: Optional[Any] = prepare_img()
__snake_case: Any = image_processor(images=snake_case_ , return_tensors="""pt""").pixel_values
logger.info(F'''Converting model {model_name}...''')
# load original state dict
__snake_case: List[Any] = torch.load(snake_case_ , map_location=torch.device("""cpu"""))
# rename keys
__snake_case: Optional[int] = rename_keys(snake_case_)
# create HuggingFace model and load state dict
__snake_case: List[str] = PoolFormerForImageClassification(snake_case_)
model.load_state_dict(snake_case_)
model.eval()
# Define image processor
__snake_case: Any = PoolFormerImageProcessor(crop_pct=snake_case_)
__snake_case: List[str] = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
__snake_case: int = model(snake_case_)
__snake_case: Optional[Any] = outputs.logits
# define expected logit slices for different models
if size == "s12":
__snake_case: List[str] = torch.tensor([-0.3_045, -0.6_758, -0.4_869])
elif size == "s24":
__snake_case: Any = torch.tensor([0.4_402, -0.1_374, -0.8_045])
elif size == "s36":
__snake_case: List[Any] = torch.tensor([-0.6_080, -0.5_133, -0.5_898])
elif size == "m36":
__snake_case: Optional[int] = torch.tensor([0.3_952, 0.2_263, -1.2_668])
elif size == "m48":
__snake_case: Union[str, Any] = torch.tensor([0.1_167, -0.0_656, -0.3_423])
else:
raise ValueError(F'''Size {size} not supported''')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , snake_case_ , atol=1e-2)
# finally, save model and image processor
logger.info(F'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''')
Path(snake_case_).mkdir(exist_ok=snake_case_)
model.save_pretrained(snake_case_)
print(F'''Saving image processor to {pytorch_dump_folder_path}''')
image_processor.save_pretrained(snake_case_)
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="poolformer_s12",
type=str,
help="Name of the model you\'d like to convert.",
)
parser.add_argument(
"--checkpoint_path", default=None, type=str, help="Path to the original PyTorch checkpoint (.pth file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
import absl # noqa: F401 # Here to have a nice missing dependency error message early on
import nltk # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import six # noqa: F401 # Here to have a nice missing dependency error message early on
from rouge_score import rouge_scorer, scoring
import datasets
__UpperCAmelCase : Any = "\\n@inproceedings{lin-2004-rouge,\n title = \"{ROUGE}: A Package for Automatic Evaluation of Summaries\",\n author = \"Lin, Chin-Yew\",\n booktitle = \"Text Summarization Branches Out\",\n month = jul,\n year = \"2004\",\n address = \"Barcelona, Spain\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W04-1013\",\n pages = \"74--81\",\n}\n"
__UpperCAmelCase : Union[str, Any] = "\\nROUGE, or Recall-Oriented Understudy for Gisting Evaluation, is a set of metrics and a software package used for\nevaluating automatic summarization and machine translation software in natural language processing.\nThe metrics compare an automatically produced summary or translation against a reference or a set of references (human-produced) summary or translation.\n\nNote that ROUGE is case insensitive, meaning that upper case letters are treated the same way as lower case letters.\n\nThis metrics is a wrapper around Google Research reimplementation of ROUGE:\nhttps://github.com/google-research/google-research/tree/master/rouge\n"
__UpperCAmelCase : Any = "\nCalculates average rouge scores for a list of hypotheses and references\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n rouge_types: A list of rouge types to calculate.\n Valid names:\n `\"rouge{n}\"` (e.g. `\"rouge1\"`, `\"rouge2\"`) where: {n} is the n-gram based scoring,\n `\"rougeL\"`: Longest common subsequence based scoring.\n `\"rougeLSum\"`: rougeLsum splits text using `\"\n\"`.\n See details in https://github.com/huggingface/datasets/issues/617\n use_stemmer: Bool indicating whether Porter stemmer should be used to strip word suffixes.\n use_aggregator: Return aggregates if this is set to True\nReturns:\n rouge1: rouge_1 (precision, recall, f1),\n rouge2: rouge_2 (precision, recall, f1),\n rougeL: rouge_l (precision, recall, f1),\n rougeLsum: rouge_lsum (precision, recall, f1)\nExamples:\n\n >>> rouge = datasets.load_metric(\'rouge\')\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> results = rouge.compute(predictions=predictions, references=references)\n >>> print(list(results.keys()))\n [\'rouge1\', \'rouge2\', \'rougeL\', \'rougeLsum\']\n >>> print(results[\"rouge1\"])\n AggregateScore(low=Score(precision=1.0, recall=1.0, fmeasure=1.0), mid=Score(precision=1.0, recall=1.0, fmeasure=1.0), high=Score(precision=1.0, recall=1.0, fmeasure=1.0))\n >>> print(results[\"rouge1\"].mid.fmeasure)\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/google-research/tree/master/rouge"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/ROUGE_(metric)""",
"""https://github.com/google-research/google-research/tree/master/rouge""",
] , )
def UpperCAmelCase__ ( self : List[Any] , A : Optional[Any] , A : Tuple , A : List[Any]=None , A : Optional[int]=True , A : Optional[int]=False ):
if rouge_types is None:
__snake_case: List[Any] = ['rouge1', 'rouge2', 'rougeL', 'rougeLsum']
__snake_case: List[Any] = rouge_scorer.RougeScorer(rouge_types=_UpperCAmelCase , use_stemmer=_UpperCAmelCase )
if use_aggregator:
__snake_case: Optional[int] = scoring.BootstrapAggregator()
else:
__snake_case: List[str] = []
for ref, pred in zip(_UpperCAmelCase , _UpperCAmelCase ):
__snake_case: List[str] = scorer.score(_UpperCAmelCase , _UpperCAmelCase )
if use_aggregator:
aggregator.add_scores(_UpperCAmelCase )
else:
scores.append(_UpperCAmelCase )
if use_aggregator:
__snake_case: Tuple = aggregator.aggregate()
else:
__snake_case: List[Any] = {}
for key in scores[0]:
__snake_case: str = [score[key] for score in scores]
return result
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
# Load checkpoint
__snake_case: Tuple = torch.load(__SCREAMING_SNAKE_CASE , map_location="""cpu""")
__snake_case: int = chkpt["model"]
# We have the base model one level deeper than the original XLM repository
__snake_case: Optional[int] = {}
for k, v in state_dict.items():
if "pred_layer" in k:
__snake_case: Any = v
else:
__snake_case: List[str] = v
__snake_case: Optional[Any] = chkpt["params"]
__snake_case: Union[str, Any] = {n: v for n, v in config.items() if not isinstance(__SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray))}
__snake_case: Optional[Any] = chkpt["dico_word2id"]
__snake_case: Optional[int] = {s + "</w>" if s.find("""@@""") == -1 and i > 13 else s.replace("""@@""" , """"""): i for s, i in vocab.items()}
# Save pytorch-model
__snake_case: Union[str, Any] = pytorch_dump_folder_path + "/" + WEIGHTS_NAME
__snake_case: List[Any] = pytorch_dump_folder_path + "/" + CONFIG_NAME
__snake_case: Dict = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"]
print(F'''Save PyTorch model to {pytorch_weights_dump_path}''')
torch.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
print(F'''Save configuration file to {pytorch_config_dump_path}''')
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2) + """\n""")
print(F'''Save vocab file to {pytorch_config_dump_path}''')
with open(__SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""") as f:
f.write(json.dumps(__SCREAMING_SNAKE_CASE , indent=2) + """\n""")
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase : List[str] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None) -> List[Any]:
__snake_case: Optional[int] = None
if token is not None:
__snake_case: int = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__snake_case: int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
__snake_case: Dict = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase).json()
__snake_case: List[str] = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]})
__snake_case: List[str] = math.ceil((result["""total_count"""] - 100) / 100)
for i in range(__lowerCAmelCase):
__snake_case: List[Any] = requests.get(url + F'''&page={i + 2}''' , headers=__lowerCAmelCase).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]})
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''')
return {}
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None) -> Optional[int]:
__snake_case: Optional[int] = None
if token is not None:
__snake_case: Union[str, Any] = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__snake_case: int = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
__snake_case: Any = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase).json()
__snake_case: int = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]})
__snake_case: Any = math.ceil((result["""total_count"""] - 100) / 100)
for i in range(__lowerCAmelCase):
__snake_case: Union[str, Any] = requests.get(url + F'''&page={i + 2}''' , headers=__lowerCAmelCase).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]})
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''')
return {}
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: Tuple = None
if token is not None:
__snake_case: int = {"""Accept""": """application/vnd.github+json""", """Authorization""": F'''Bearer {token}'''}
__snake_case: Any = requests.get(__lowerCAmelCase , headers=__lowerCAmelCase , allow_redirects=__lowerCAmelCase)
__snake_case: Dict = result.headers["""Location"""]
__snake_case: List[str] = requests.get(__lowerCAmelCase , allow_redirects=__lowerCAmelCase)
__snake_case: Tuple = os.path.join(__lowerCAmelCase , F'''{artifact_name}.zip''')
with open(__lowerCAmelCase , """wb""") as fp:
fp.write(response.content)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None) -> Optional[int]:
__snake_case: Optional[Any] = []
__snake_case: str = []
__snake_case: Dict = None
with zipfile.ZipFile(__lowerCAmelCase) as z:
for filename in z.namelist():
if not os.path.isdir(__lowerCAmelCase):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(__lowerCAmelCase) as f:
for line in f:
__snake_case: int = line.decode("""UTF-8""").strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
__snake_case: Optional[int] = line[: line.index(""": """)]
__snake_case: Optional[Any] = line[line.index(""": """) + len(""": """) :]
errors.append([error_line, error])
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """):
# `test` is the test method that failed
__snake_case: str = line[len("""FAILED """) :]
failed_tests.append(__lowerCAmelCase)
elif filename == "job_name.txt":
__snake_case: Tuple = line
if len(__lowerCAmelCase) != len(__lowerCAmelCase):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(__lowerCAmelCase)} for `errors` '''
F'''and {len(__lowerCAmelCase)} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
""" problem.""")
__snake_case: Optional[Any] = None
if job_name and job_links:
__snake_case: Tuple = job_links.get(__lowerCAmelCase , __lowerCAmelCase)
# A list with elements of the form (line of error, error, failed test)
__snake_case: str = [x + [y] + [job_link] for x, y in zip(__lowerCAmelCase , __lowerCAmelCase)]
return result
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None) -> Any:
__snake_case: Any = []
__snake_case: List[Any] = [os.path.join(__lowerCAmelCase , __lowerCAmelCase) for p in os.listdir(__lowerCAmelCase) if p.endswith(""".zip""")]
for p in paths:
errors.extend(get_errors_from_single_artifact(__lowerCAmelCase , job_links=__lowerCAmelCase))
return errors
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None) -> Dict:
__snake_case: int = Counter()
counter.update([x[1] for x in logs])
__snake_case: Any = counter.most_common()
__snake_case: int = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
__snake_case: Tuple = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
__snake_case: Optional[Any] = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE__: item[1]["count"] , reverse=__lowerCAmelCase))
return r
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[int]:
__snake_case: Optional[int] = test.split("""::""")[0]
if test.startswith("""tests/models/"""):
__snake_case: str = test.split("""/""")[2]
else:
__snake_case: str = None
return test
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None) -> Optional[int]:
__snake_case: Optional[Any] = [(x[0], x[1], get_model(x[2])) for x in logs]
__snake_case: Union[str, Any] = [x for x in logs if x[2] is not None]
__snake_case: int = {x[2] for x in logs}
__snake_case: Tuple = {}
for test in tests:
__snake_case: Tuple = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test])
__snake_case: Optional[int] = counter.most_common()
__snake_case: int = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
__snake_case: Union[str, Any] = sum(error_counts.values())
if n_errors > 0:
__snake_case: Union[str, Any] = {"""count""": n_errors, """errors""": error_counts}
__snake_case: List[str] = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE__: item[1]["count"] , reverse=__lowerCAmelCase))
return r
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
__snake_case: List[str] = """| no. | error | status |"""
__snake_case: str = """|-:|:-|:-|"""
__snake_case: Any = [header, sep]
for error in reduced_by_error:
__snake_case: Any = reduced_by_error[error]["""count"""]
__snake_case: Tuple = F'''| {count} | {error[:100]} | |'''
lines.append(__lowerCAmelCase)
return "\n".join(__lowerCAmelCase)
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Union[str, Any] = """| model | no. of errors | major error | count |"""
__snake_case: Tuple = """|-:|-:|-:|-:|"""
__snake_case: int = [header, sep]
for model in reduced_by_model:
__snake_case: Optional[Any] = reduced_by_model[model]["""count"""]
__snake_case , __snake_case: int = list(reduced_by_model[model]["""errors"""].items())[0]
__snake_case: str = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(__lowerCAmelCase)
return "\n".join(__lowerCAmelCase)
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
__UpperCAmelCase : Optional[int] = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
__UpperCAmelCase : Union[str, Any] = get_job_links(args.workflow_run_id, token=args.token)
__UpperCAmelCase : Optional[int] = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
__UpperCAmelCase : int = k.find(" / ")
__UpperCAmelCase : Tuple = k[index + len(" / ") :]
__UpperCAmelCase : Dict = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase : List[Any] = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
__UpperCAmelCase : List[Any] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
__UpperCAmelCase : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
__UpperCAmelCase : Optional[Any] = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
__UpperCAmelCase : List[Any] = reduce_by_error(errors)
__UpperCAmelCase : str = reduce_by_model(errors)
__UpperCAmelCase : List[Any] = make_github_table(reduced_by_error)
__UpperCAmelCase : Tuple = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 371
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
def update_area_of_max_square(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
__snake_case: Union[str, Any] = update_area_of_max_square(SCREAMING_SNAKE_CASE__ , col + 1)
__snake_case: Optional[Any] = update_area_of_max_square(row + 1 , col + 1)
__snake_case: Union[str, Any] = update_area_of_max_square(row + 1 , SCREAMING_SNAKE_CASE__)
if mat[row][col]:
__snake_case: Dict = 1 + min([right, diagonal, down])
__snake_case: Tuple = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__)
return sub_problem_sol
else:
return 0
__snake_case: Union[str, Any] = [0]
update_area_of_max_square(0 , 0)
return largest_square_area[0]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
def update_area_of_max_square_using_dp_array(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
__snake_case: Optional[int] = update_area_of_max_square_using_dp_array(SCREAMING_SNAKE_CASE__ , col + 1 , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = update_area_of_max_square_using_dp_array(row + 1 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if mat[row][col]:
__snake_case: Any = 1 + min([right, diagonal, down])
__snake_case: Optional[Any] = max(largest_square_area[0] , SCREAMING_SNAKE_CASE__)
__snake_case: int = sub_problem_sol
return sub_problem_sol
else:
return 0
__snake_case: Any = [0]
__snake_case: int = [[-1] * cols for _ in range(SCREAMING_SNAKE_CASE__)]
update_area_of_max_square_using_dp_array(0 , 0 , SCREAMING_SNAKE_CASE__)
return largest_square_area[0]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Tuple = [[0] * (cols + 1) for _ in range(rows + 1)]
__snake_case: str = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
__snake_case: Tuple = dp_array[row][col + 1]
__snake_case: List[str] = dp_array[row + 1][col + 1]
__snake_case: Tuple = dp_array[row + 1][col]
if mat[row][col] == 1:
__snake_case: List[str] = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = max(dp_array[row][col] , SCREAMING_SNAKE_CASE__)
else:
__snake_case: List[Any] = 0
return largest_square_area
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Union[str, Any] = [0] * (cols + 1)
__snake_case: Union[str, Any] = [0] * (cols + 1)
__snake_case: Any = 0
for row in range(rows - 1 , -1 , -1):
for col in range(cols - 1 , -1 , -1):
__snake_case: int = current_row[col + 1]
__snake_case: Union[str, Any] = next_row[col + 1]
__snake_case: Optional[Any] = next_row[col]
if mat[row][col] == 1:
__snake_case: Optional[int] = 1 + min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: int = max(current_row[col] , SCREAMING_SNAKE_CASE__)
else:
__snake_case: Tuple = 0
__snake_case: Tuple = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]]))
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 0
|
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
__UpperCAmelCase : int = logging.getLogger(__name__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
# save results
if os.path.exists(SCREAMING_SNAKE_CASE__):
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""")) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , """config.json""")):
os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , """config.json"""))
if os.path.exists(os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""")) and os.path.isfile(
os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin""")):
os.remove(os.path.join(SCREAMING_SNAKE_CASE__ , """pytorch_model.bin"""))
else:
os.makedirs(SCREAMING_SNAKE_CASE__)
model.save_pretrained(SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False) -> Tuple:
__snake_case: Optional[int] = 2
if unlogit:
__snake_case: List[Any] = torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = p * torch.log(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = 0
return -plogp.sum(dim=-1)
def A__ ( SCREAMING_SNAKE_CASE__) -> List[Any]:
logger.info("""lv, h >\t""" + """\t""".join(F'''{x + 1}''' for x in range(len(SCREAMING_SNAKE_CASE__))))
for row in range(len(SCREAMING_SNAKE_CASE__)):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:.5f}''' for x in tensor[row].cpu().data))
else:
logger.info(F'''layer {row + 1}:\t''' + """\t""".join(F'''{x:d}''' for x in tensor[row].cpu().data))
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=False) -> List[Any]:
__snake_case: List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
__snake_case: str = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).to(args.device)
__snake_case: int = torch.zeros(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).to(args.device)
if head_mask is None:
__snake_case: int = torch.ones(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).to(args.device)
head_mask.requires_grad_(requires_grad=SCREAMING_SNAKE_CASE__)
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
__snake_case: Optional[int] = None
__snake_case: Dict = 0.0
__snake_case: int = 0.0
for step, inputs in enumerate(tqdm(SCREAMING_SNAKE_CASE__ , desc="""Iteration""" , disable=args.local_rank not in [-1, 0])):
__snake_case: Any = tuple(t.to(args.device) for t in inputs)
(__snake_case ): Dict = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
__snake_case: Tuple = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__)
# (loss), lm_logits, presents, (all hidden_states), (attentions)
__snake_case: List[Any] = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(SCREAMING_SNAKE_CASE__):
__snake_case: Dict = entropy(attn.detach() , SCREAMING_SNAKE_CASE__)
attn_entropy[layer] += masked_entropy.sum(-1).sum(0).sum(0).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(SCREAMING_SNAKE_CASE__).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
__snake_case: Any = 2
__snake_case: Union[str, Any] = torch.pow(torch.pow(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).sum(-1) , 1 / exponent)
head_importance /= norm_by_layer.unsqueeze(-1) + 1e-20
if not args.dont_normalize_global_importance:
__snake_case: int = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info("""Attention entropies""")
print_ad_tensor(SCREAMING_SNAKE_CASE__)
if compute_importance:
logger.info("""Head importance scores""")
print_ad_tensor(SCREAMING_SNAKE_CASE__)
logger.info("""Head ranked by importance scores""")
__snake_case: Dict = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device)
__snake_case: Optional[int] = torch.arange(
head_importance.numel() , device=args.device)
__snake_case: List[Any] = head_ranks.view_as(SCREAMING_SNAKE_CASE__)
print_ad_tensor(SCREAMING_SNAKE_CASE__)
return attn_entropy, head_importance, total_loss
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
__snake_case: Any = compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__)
__snake_case: str = 1 / loss # instead of downsteam score use the LM loss
logger.info("""Pruning: original score: %f, threshold: %f""" , SCREAMING_SNAKE_CASE__ , original_score * args.masking_threshold)
__snake_case: Optional[int] = torch.ones_like(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = max(1 , int(new_head_mask.numel() * args.masking_amount))
__snake_case: Optional[Any] = original_score
while current_score >= original_score * args.masking_threshold:
__snake_case: str = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
__snake_case: List[Any] = float("""Inf""")
__snake_case: str = head_importance.view(-1).sort()[1]
if len(SCREAMING_SNAKE_CASE__) <= num_to_mask:
print("""BREAK BY num_to_mask""")
break
# mask heads
__snake_case: Tuple = current_heads_to_mask[:num_to_mask]
logger.info("""Heads to mask: %s""" , str(current_heads_to_mask.tolist()))
__snake_case: Optional[int] = new_head_mask.view(-1)
__snake_case: Optional[int] = 0.0
__snake_case: Union[str, Any] = new_head_mask.view_as(SCREAMING_SNAKE_CASE__)
__snake_case: str = new_head_mask.clone().detach()
print_ad_tensor(SCREAMING_SNAKE_CASE__)
# Compute metric and head importance again
__snake_case: Tuple = compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = 1 / loss
logger.info(
"""Masking: current score: %f, remaining heads %d (%.1f percents)""" , SCREAMING_SNAKE_CASE__ , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 100 , )
logger.info("""Final head mask""")
print_ad_tensor(SCREAMING_SNAKE_CASE__)
np.save(os.path.join(args.output_dir , """head_mask.npy""") , head_mask.detach().cpu().numpy())
return head_mask
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[Any]:
__snake_case: Any = datetime.now()
__snake_case: List[Any] = compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = 1 / loss
__snake_case: Dict = datetime.now() - before_time
__snake_case: Optional[Any] = sum(p.numel() for p in model.parameters())
__snake_case: List[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(SCREAMING_SNAKE_CASE__))
}
for k, v in heads_to_prune.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: str = [
v,
]
assert sum(len(SCREAMING_SNAKE_CASE__) for h in heads_to_prune.values()) == (1 - head_mask.long()).sum().item()
model.prune_heads(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = sum(p.numel() for p in model.parameters())
__snake_case: Union[str, Any] = datetime.now()
__snake_case: Optional[Any] = compute_heads_importance(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , compute_entropy=SCREAMING_SNAKE_CASE__ , compute_importance=SCREAMING_SNAKE_CASE__ , head_mask=SCREAMING_SNAKE_CASE__ , actually_pruned=SCREAMING_SNAKE_CASE__ , )
__snake_case: Optional[int] = 1 / loss
__snake_case: Optional[Any] = datetime.now() - before_time
logger.info(
"""Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , pruned_num_params / original_num_params * 100 , )
logger.info("""Pruning: score with masking: %f score with pruning: %f""" , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
logger.info("""Pruning: speed ratio (original timing / new timing): %f percents""" , original_time / new_time * 100)
save_model(SCREAMING_SNAKE_CASE__ , args.output_dir)
def A__ ( ) -> Optional[Any]:
__snake_case: List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--data_dir""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The input data dir. Should contain the .tsv files (or other data files) for the task.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--output_dir""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
# Other parameters
parser.add_argument(
"""--config_name""" , default="""""" , type=SCREAMING_SNAKE_CASE__ , help="""Pretrained config name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--tokenizer_name""" , default="""""" , type=SCREAMING_SNAKE_CASE__ , help="""Pretrained tokenizer name or path if not the same as model_name_or_path""" , )
parser.add_argument(
"""--cache_dir""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Where do you want to store the pre-trained models downloaded from s3""" , )
parser.add_argument(
"""--data_subset""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""If > 0: limit the data to a subset of data_subset instances.""")
parser.add_argument(
"""--overwrite_output_dir""" , action="""store_true""" , help="""Whether to overwrite data in output directory""")
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""")
parser.add_argument(
"""--dont_normalize_importance_by_layer""" , action="""store_true""" , help="""Don't normalize importance score by layers""")
parser.add_argument(
"""--dont_normalize_global_importance""" , action="""store_true""" , help="""Don't normalize all importance scores between 0 and 1""" , )
parser.add_argument(
"""--try_masking""" , action="""store_true""" , help="""Whether to try to mask head until a threshold of accuracy.""")
parser.add_argument(
"""--masking_threshold""" , default=0.9 , type=SCREAMING_SNAKE_CASE__ , help="""masking threshold in term of metrics (stop masking when metric < threshold * original metric value).""" , )
parser.add_argument(
"""--masking_amount""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Amount to heads to masking at each masking step.""")
parser.add_argument("""--metric_name""" , default="""acc""" , type=SCREAMING_SNAKE_CASE__ , help="""Metric to use for head masking.""")
parser.add_argument(
"""--max_seq_length""" , default=128 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after WordPiece tokenization. \n"""
"""Sequences longer than this will be truncated, sequences shorter padded."""
) , )
parser.add_argument("""--batch_size""" , default=1 , type=SCREAMING_SNAKE_CASE__ , help="""Batch size.""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=42)
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""local_rank for distributed training on gpus""")
parser.add_argument("""--no_cuda""" , action="""store_true""" , help="""Whether not to use CUDA when available""")
parser.add_argument("""--server_ip""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , help="""Can be used for distant debugging.""")
parser.add_argument("""--server_port""" , type=SCREAMING_SNAKE_CASE__ , default="""""" , help="""Can be used for distant debugging.""")
__snake_case: Optional[Any] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=SCREAMING_SNAKE_CASE__)
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
__snake_case: Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() and not args.no_cuda else """cpu""")
__snake_case: str = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank)
__snake_case: Union[str, Any] = torch.device("""cuda""" , args.local_rank)
__snake_case: Union[str, Any] = 1
torch.distributed.init_process_group(backend="""nccl""") # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.info("""device: {} n_gpu: {}, distributed: {}""".format(args.device , args.n_gpu , bool(args.local_rank != -1)))
__snake_case: Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path)
# Distributed and parallel training
model.to(args.device)
if args.local_rank != -1:
__snake_case: Dict = nn.parallel.DistributedDataParallel(
SCREAMING_SNAKE_CASE__ , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=SCREAMING_SNAKE_CASE__)
elif args.n_gpu > 1:
__snake_case: str = nn.DataParallel(SCREAMING_SNAKE_CASE__)
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=SCREAMING_SNAKE_CASE__)
torch.save(SCREAMING_SNAKE_CASE__ , os.path.join(args.output_dir , """run_args.bin"""))
logger.info("""Training/evaluation parameters %s""" , SCREAMING_SNAKE_CASE__)
# Prepare dataset
__snake_case: str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa),
])
__snake_case: Optional[int] = (torch.from_numpy(SCREAMING_SNAKE_CASE__),)
__snake_case: Optional[Any] = TensorDataset(*SCREAMING_SNAKE_CASE__)
__snake_case: int = RandomSampler(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = DataLoader(SCREAMING_SNAKE_CASE__ , sampler=SCREAMING_SNAKE_CASE__ , batch_size=args.batch_size)
# Compute head entropy and importance score
compute_heads_importance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
__snake_case: Dict = mask_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
prune_heads(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
main()
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293
| 0
|
"""simple docstring"""
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
__UpperCAmelCase : Tuple = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Any:
# 1. in HF T5, we have block.{x}.layer.{y}. which corresponds to layer.{x} in
# the original model
__snake_case: Optional[int] = list(s_dict.keys())
for key in keys:
__snake_case: List[Any] = r""".*/layers_(\d+)"""
__snake_case: Any = key
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Union[str, Any] = re.sub(r"""layers_(\d+)""" , r"""block/\1/layer""" , SCREAMING_SNAKE_CASE__)
__snake_case: Dict = r"""(encoder|decoder)\/"""
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Any = re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).groups()
if groups[0] == "encoder":
__snake_case: Tuple = re.sub(r"""/mlp/""" , r"""/1/mlp/""" , SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/1/layer_norm/""" , SCREAMING_SNAKE_CASE__)
elif groups[0] == "decoder":
__snake_case: Any = re.sub(r"""/mlp/""" , r"""/2/mlp/""" , SCREAMING_SNAKE_CASE__)
__snake_case: str = re.sub(r"""/pre_mlp_layer_norm/""" , r"""/2/layer_norm/""" , SCREAMING_SNAKE_CASE__)
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
__snake_case: Dict = new_key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
print(F'''{key} -> {new_key}''')
__snake_case: Optional[int] = s_dict.pop(SCREAMING_SNAKE_CASE__)
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__snake_case: int = s_dict[
"""encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
__snake_case: Tuple = s_dict[
"""decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"""
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys()):
if "expert" in key:
__snake_case: Dict = s_dict[key].shape[0]
__snake_case: Any = s_dict[key]
for idx in range(SCREAMING_SNAKE_CASE__):
__snake_case: Optional[Any] = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring')}''')
s_dict.pop(SCREAMING_SNAKE_CASE__)
return s_dict
__UpperCAmelCase : Union[str, Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Tuple:
# Convert a google style config to the hugging face fromat
import regex as re
with open(SCREAMING_SNAKE_CASE__ , """r""") as f:
__snake_case: Optional[Any] = f.read()
__snake_case: Any = re.findall(r"""(.*) = ([0-9.]*)""" , SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
__snake_case: List[str] = float(SCREAMING_SNAKE_CASE__) if """.""" in value else int(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = re.findall(r"""(.*activations) = \(\'(.*)\',\)""" , SCREAMING_SNAKE_CASE__)[0]
__snake_case: Tuple = str(activation[1])
__snake_case: Any = num_experts
__snake_case: Tuple = SwitchTransformersConfig(**SCREAMING_SNAKE_CASE__)
return config
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="./" , SCREAMING_SNAKE_CASE__=8) -> int:
# Initialise PyTorch model
print(F'''Loading flax weights from : {flax_checkpoint_path}''')
__snake_case: Tuple = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE__)
if gin_file is not None:
__snake_case: Optional[Any] = convert_gin_to_config(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
else:
__snake_case: Dict = SwitchTransformersConfig.from_pretrained(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = SwitchTransformersForConditionalGeneration(SCREAMING_SNAKE_CASE__)
__snake_case: Any = flax_params["""target"""]
__snake_case: Optional[int] = flatten_dict(SCREAMING_SNAKE_CASE__ , sep="""/""")
__snake_case: List[Any] = rename_keys(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = unflatten_dict(SCREAMING_SNAKE_CASE__ , sep="""/""")
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
print(F'''Save PyTorch model to {pytorch_dump_path}''')
pt_model.save_pretrained(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
__UpperCAmelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
__UpperCAmelCase : Union[str, Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ = 50) -> int:
__snake_case: str = [[0] * 3 for _ in range(length + 1)]
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
different_colour_ways_number[row_length][tile_length - 2] += (
different_colour_ways_number[row_length - tile_start - tile_length][
tile_length - 2
]
+ 1
)
return sum(different_colour_ways_number[length])
if __name__ == "__main__":
print(f'{solution() = }')
| 353
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293
| 0
|
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__UpperCAmelCase : str = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
__UpperCAmelCase : Any = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
__UpperCAmelCase : List[Any] = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase__ ( self : List[str] , A : int , A : str , A : Any=4 , A : Tuple=False ):
__snake_case: Tuple = compute_bleu(
reference_corpus=A , translation_corpus=A , max_order=A , smooth=A )
(__snake_case): Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 355
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 293
| 0
|
from __future__ import annotations
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> tuple[float, list[float]]:
__snake_case: Union[str, Any] = list(range(len(SCREAMING_SNAKE_CASE__)))
__snake_case: List[Any] = [v / w for v, w in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)]
index.sort(key=lambda SCREAMING_SNAKE_CASE__: ratio[i] , reverse=SCREAMING_SNAKE_CASE__)
__snake_case: float = 0
__snake_case: list[float] = [0] * len(SCREAMING_SNAKE_CASE__)
for i in index:
if weight[i] <= capacity:
__snake_case: Any = 1
max_value += value[i]
capacity -= weight[i]
else:
__snake_case: List[str] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 356
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293
| 0
|
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
return getitem, k
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
return setitem, k, v
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
return delitem, k
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__) -> List[Any]:
try:
return fun(SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__), None
except Exception as e:
return None, e
__UpperCAmelCase : Tuple = (
_set("key_a", "val_a"),
_set("key_b", "val_b"),
)
__UpperCAmelCase : int = [
_set("key_a", "val_a"),
_set("key_a", "val_b"),
]
__UpperCAmelCase : Any = [
_set("key_a", "val_a"),
_set("key_b", "val_b"),
_del("key_a"),
_del("key_b"),
_set("key_a", "val_a"),
_del("key_a"),
]
__UpperCAmelCase : Tuple = [
_get("key_a"),
_del("key_a"),
_set("key_a", "val_a"),
_del("key_a"),
_del("key_a"),
_get("key_a"),
]
__UpperCAmelCase : Dict = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__UpperCAmelCase : Union[str, Any] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("key_a", "val_b"),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items"""),
pytest.param(_overwrite_items , id="""overwrite items"""),
pytest.param(_delete_items , id="""delete items"""),
pytest.param(_access_absent_items , id="""access absent items"""),
pytest.param(_add_with_resize_up , id="""add with resize up"""),
pytest.param(_add_with_resize_down , id="""add with resize down"""),
) , )
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Optional[Any] = HashMap(initial_block_size=4)
__snake_case: Union[str, Any] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE__):
__snake_case: Optional[Any] = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = _run_operation(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , *SCREAMING_SNAKE_CASE__)
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE__) == str(SCREAMING_SNAKE_CASE__)
assert set(SCREAMING_SNAKE_CASE__) == set(SCREAMING_SNAKE_CASE__)
assert len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__)
assert set(my.items()) == set(py.items())
def A__ ( ) -> int:
def is_public(SCREAMING_SNAKE_CASE__) -> bool:
return not name.startswith("""_""")
__snake_case: Tuple = {name for name in dir({}) if is_public(SCREAMING_SNAKE_CASE__)}
__snake_case: List[Any] = {name for name in dir(HashMap()) if is_public(SCREAMING_SNAKE_CASE__)}
assert dict_public_names > hash_public_names
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__UpperCAmelCase : List[Any] = {
"iou_prediction_head.layers.0": "iou_prediction_head.proj_in",
"iou_prediction_head.layers.1": "iou_prediction_head.layers.0",
"iou_prediction_head.layers.2": "iou_prediction_head.proj_out",
"mask_decoder.output_upscaling.0": "mask_decoder.upscale_conv1",
"mask_decoder.output_upscaling.1": "mask_decoder.upscale_layer_norm",
"mask_decoder.output_upscaling.3": "mask_decoder.upscale_conv2",
"mask_downscaling.0": "mask_embed.conv1",
"mask_downscaling.1": "mask_embed.layer_norm1",
"mask_downscaling.3": "mask_embed.conv2",
"mask_downscaling.4": "mask_embed.layer_norm2",
"mask_downscaling.6": "mask_embed.conv3",
"point_embeddings": "point_embed",
"pe_layer.positional_encoding_gaussian_matrix": "shared_embedding.positional_embedding",
"image_encoder": "vision_encoder",
"neck.0": "neck.conv1",
"neck.1": "neck.layer_norm1",
"neck.2": "neck.conv2",
"neck.3": "neck.layer_norm2",
"patch_embed.proj": "patch_embed.projection",
".norm": ".layer_norm",
"blocks": "layers",
}
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
__snake_case: int = {}
state_dict.pop("""pixel_mean""" , SCREAMING_SNAKE_CASE__)
state_dict.pop("""pixel_std""" , SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__snake_case: List[str] = key.replace(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = int(re.match(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__).group(2))
if layer_nb == 0:
__snake_case: Tuple = key.replace("""layers.0""" , """proj_in""")
elif layer_nb == 1:
__snake_case: Union[str, Any] = key.replace("""layers.1""" , """layers.0""")
elif layer_nb == 2:
__snake_case: Optional[Any] = key.replace("""layers.2""" , """proj_out""")
__snake_case: Union[str, Any] = value
__snake_case: Tuple = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="ybelkada/segment-anything") -> Optional[int]:
__snake_case: List[Any] = hf_hub_download(SCREAMING_SNAKE_CASE__ , F'''checkpoints/{model_name}.pth''')
if "sam_vit_b" in model_name:
__snake_case: List[str] = SamConfig()
elif "sam_vit_l" in model_name:
__snake_case: Dict = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
__snake_case: Union[str, Any] = SamConfig(
vision_config=SCREAMING_SNAKE_CASE__ , )
elif "sam_vit_h" in model_name:
__snake_case: Tuple = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
__snake_case: int = SamConfig(
vision_config=SCREAMING_SNAKE_CASE__ , )
__snake_case: List[str] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="""cpu""")
__snake_case: int = replace_keys(SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = SamImageProcessor()
__snake_case: Optional[Any] = SamProcessor(image_processor=SCREAMING_SNAKE_CASE__)
__snake_case: str = SamModel(SCREAMING_SNAKE_CASE__)
hf_model.load_state_dict(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = hf_model.to("""cuda""")
__snake_case: Dict = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
__snake_case: Dict = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__).raw).convert("""RGB""")
__snake_case: Union[str, Any] = [[[400, 650]]]
__snake_case: int = [[1]]
__snake_case: List[str] = processor(images=np.array(SCREAMING_SNAKE_CASE__) , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
__snake_case: Union[str, Any] = hf_model(**SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_890_251_159_668
__snake_case: List[str] = processor(
images=np.array(SCREAMING_SNAKE_CASE__) , input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
__snake_case: Optional[int] = hf_model(**SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_712_603_092_193_604
__snake_case: Optional[int] = ((75, 275, 1725, 850),)
__snake_case: Union[str, Any] = processor(images=np.array(SCREAMING_SNAKE_CASE__) , input_boxes=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
__snake_case: List[str] = hf_model(**SCREAMING_SNAKE_CASE__)
__snake_case: Any = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_686_015_605_926_514
# Test with 2 points and 1 image.
__snake_case: Dict = [[[400, 650], [800, 650]]]
__snake_case: Any = [[1, 1]]
__snake_case: List[Any] = processor(
images=np.array(SCREAMING_SNAKE_CASE__) , input_points=SCREAMING_SNAKE_CASE__ , input_labels=SCREAMING_SNAKE_CASE__ , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
__snake_case: List[str] = hf_model(**SCREAMING_SNAKE_CASE__)
__snake_case: str = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_936_047_792_434_692
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
__UpperCAmelCase : Optional[Any] = ["sam_vit_b_01ec64", "sam_vit_h_4b8939", "sam_vit_l_0b3195"]
parser.add_argument(
"--model_name",
default="sam_vit_h_4b8939",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
parser.add_argument(
"--model_hub_id",
default="ybelkada/segment-anything",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
__UpperCAmelCase : Any = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 358
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 0
|
from __future__ import annotations
from collections import namedtuple
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> tuple:
__snake_case: List[str] = namedtuple("""result""" , """name value""")
if (voltage, current, power).count(0) != 1:
raise ValueError("""Only one argument must be 0""")
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""")
elif voltage == 0:
return result("""voltage""" , power / current)
elif current == 0:
return result("""current""" , power / voltage)
elif power == 0:
return result("""power""" , float(round(abs(voltage * current) , 2)))
else:
raise ValueError("""Exactly one argument must be 0""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 0
|
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
__UpperCAmelCase : Any = logging.getLogger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Dict , A : Any , A : Tuple , A : List[str] , A : Dict=None ):
super().__init__(
A , question_encoder_tokenizer=A , generator_tokenizer=A , index=A , init_retrieval=A , )
__snake_case: List[str] = None
def UpperCAmelCase__ ( self : List[str] , A : int ):
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__snake_case: Union[str, Any] = self._infer_socket_ifname()
# avoid clash with the NCCL port
__snake_case: Optional[int] = str(distributed_port + 1 )
__snake_case: Tuple = dist.new_group(ranks=A , backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase__ ( self : Optional[Any] ):
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase__ ( self : Tuple , A : Union[str, Any] , A : Tuple , A : int=torch.floataa ):
__snake_case: Tuple = torch.empty(A , dtype=A )
dist.scatter(A , src=0 , scatter_list=A , group=self.process_group )
return target_tensor
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__snake_case: Optional[Any] = next((addr for addr in addrs if addr.startswith("""e""" )) , A )
return ifname
def UpperCAmelCase__ ( self : Dict , A : np.ndarray , A : int ):
# single GPU training
if not dist.is_initialized():
__snake_case: str = self._main_retrieve(A , A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(A )
# distributed training
__snake_case: Union[str, Any] = dist.get_world_size(group=self.process_group )
# gather logic
__snake_case: Optional[Any] = None
if self._is_main():
__snake_case: Dict = [torch.empty(question_hidden_states.shape , dtype=torch.floataa ) for _ in range(A )]
dist.gather(torch.tensor(A ) , dst=0 , gather_list=A , group=self.process_group )
# scatter logic
__snake_case: List[Any] = question_hidden_states.shape[0]
__snake_case: Dict = []
__snake_case: List[str] = []
if self._is_main():
assert len(A ) == world_size
__snake_case: Dict = self._main_retrieve(torch.cat(A ).numpy() , A )
__snake_case: Optional[Any] = torch.tensor(A ), torch.tensor(A )
__snake_case: List[str] = self._chunk_tensor(A , A )
__snake_case: str = self._chunk_tensor(A , A )
__snake_case: Optional[int] = self._scattered(A , [n_queries, n_docs] , target_type=torch.intaa )
__snake_case: Optional[Any] = self._scattered(A , [n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(A )
| 360
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 0
|
from __future__ import annotations
from statistics import mean
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list[int]:
__snake_case: int = [0] * no_of_processes
__snake_case: Any = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(SCREAMING_SNAKE_CASE__):
__snake_case: Any = burst_time[i]
__snake_case: list[int] = []
__snake_case: Union[str, Any] = 0
__snake_case: Any = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
__snake_case: Dict = []
__snake_case: Optional[int] = -1
for i in range(SCREAMING_SNAKE_CASE__):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(SCREAMING_SNAKE_CASE__)
if len(SCREAMING_SNAKE_CASE__) > 0:
__snake_case: Any = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
__snake_case: int = i
total_time += burst_time[target_process]
completed += 1
__snake_case: List[Any] = 0
__snake_case: str = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list[int]:
__snake_case: str = [0] * no_of_processes
for i in range(SCREAMING_SNAKE_CASE__):
__snake_case: List[Any] = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("[TEST CASE 01]")
__UpperCAmelCase : str = 4
__UpperCAmelCase : List[str] = [2, 5, 3, 7]
__UpperCAmelCase : Optional[Any] = [0, 0, 0, 0]
__UpperCAmelCase : Any = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__UpperCAmelCase : Optional[Any] = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time")
for i, process_id in enumerate(list(range(1, 5))):
print(
f'{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t'
f'{waiting_time[i]}\t\t\t\t{turn_around_time[i]}'
)
print(f'\nAverage waiting time = {mean(waiting_time):.5f}')
print(f'Average turnaround time = {mean(turn_around_time):.5f}')
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
__UpperCAmelCase : Optional[int] = None
__UpperCAmelCase : Dict = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : Tuple = {
"vocab_file": {
"facebook/mbart-large-en-ro": (
"https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"
),
"facebook/mbart-large-cc25": (
"https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"
),
},
"tokenizer_file": {
"facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json",
"facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json",
},
}
__UpperCAmelCase : List[str] = {
"facebook/mbart-large-en-ro": 1_024,
"facebook/mbart-large-cc25": 1_024,
}
# fmt: off
__UpperCAmelCase : Dict = ["ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN"]
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = MBartTokenizer
lowerCAmelCase__ = []
lowerCAmelCase__ = []
def __init__( self : Dict , A : Optional[int]=None , A : Optional[Any]=None , A : Tuple="<s>" , A : Tuple="</s>" , A : Optional[int]="</s>" , A : List[Any]="<s>" , A : Tuple="<unk>" , A : Tuple="<pad>" , A : List[str]="<mask>" , A : int=None , A : Tuple=None , A : Any=None , **A : Union[str, Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
__snake_case: Optional[int] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else mask_token
super().__init__(
vocab_file=A , tokenizer_file=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , src_lang=A , tgt_lang=A , additional_special_tokens=A , **A , )
__snake_case: Tuple = vocab_file
__snake_case: Optional[Any] = False if not self.vocab_file else True
__snake_case: Any = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({"""additional_special_tokens""": _additional_special_tokens} )
__snake_case: Tuple = {
lang_code: self.convert_tokens_to_ids(A ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
__snake_case: Dict = src_lang if src_lang is not None else """en_XX"""
__snake_case: List[str] = self.convert_tokens_to_ids(self._src_lang )
__snake_case: Any = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCAmelCase__ ( self : Tuple ):
return self._src_lang
@src_lang.setter
def UpperCAmelCase__ ( self : int , A : str ):
__snake_case: Dict = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def UpperCAmelCase__ ( self : str , A : List[int] , A : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCAmelCase__ ( self : Optional[int] , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Any = [self.sep_token_id]
__snake_case: Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : List[Any] , A : Optional[Any] , A : str , A : Optional[str] , A : Optional[str] , **A : Any ):
if src_lang is None or tgt_lang is None:
raise ValueError("""Translation requires a `src_lang` and a `tgt_lang` for this model""" )
__snake_case: str = src_lang
__snake_case: Optional[Any] = self(A , add_special_tokens=A , return_tensors=A , **A )
__snake_case: List[Any] = self.convert_tokens_to_ids(A )
__snake_case: str = tgt_lang_id
return inputs
def UpperCAmelCase__ ( self : Optional[int] , A : List[str] , A : str = "en_XX" , A : Optional[List[str]] = None , A : str = "ro_RO" , **A : Tuple , ):
__snake_case: str = src_lang
__snake_case: List[str] = tgt_lang
return super().prepare_seqaseq_batch(A , A , **A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCAmelCase__ ( self : List[Any] ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCAmelCase__ ( self : str , A : List[str] ):
__snake_case: int = self.convert_tokens_to_ids(A )
__snake_case: List[Any] = []
__snake_case: str = [self.eos_token_id, self.cur_lang_code]
__snake_case: Any = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case: Dict = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case: Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ ( self : Dict , A : str ):
__snake_case: Any = self.convert_tokens_to_ids(A )
__snake_case: List[str] = []
__snake_case: Optional[int] = [self.eos_token_id, self.cur_lang_code]
__snake_case: int = self.convert_ids_to_tokens(self.prefix_tokens )
__snake_case: List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
__snake_case: Any = processors.TemplateProcessing(
single=prefix_tokens_str + ["""$A"""] + suffix_tokens_str , pair=prefix_tokens_str + ["""$A""", """$B"""] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def UpperCAmelCase__ ( self : Dict , A : str , A : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
__snake_case: Optional[int] = os.path.join(
A , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A ):
copyfile(self.vocab_file , A )
return (out_vocab_file,)
| 362
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 0
|
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[Any]:
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__snake_case: Dict = flax_key_tuple[:-1] + ("""weight""",)
__snake_case: Any = torch.permute(SCREAMING_SNAKE_CASE__ , (0, 2, 1))
elif flax_key_tuple[-1] == "kernel" and ".".join(SCREAMING_SNAKE_CASE__):
# linear layer
__snake_case: Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
__snake_case: List[str] = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__snake_case: Optional[Any] = flax_key_tuple[:-1] + ("""weight""",)
return flax_key_tuple, flax_tensor
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
if "metadata" in layer:
__snake_case: Union[str, Any] = layer.split("""metadata""")
__snake_case: Optional[Any] = """""".join(split_layer[0])[:-1]
__snake_case: Optional[int] = [tuple(("""metadata""" + split_layer[1]).split("""/"""))]
elif "kvstore" in layer:
__snake_case: Optional[Any] = layer.split("""kvstore""")
__snake_case: List[Any] = """""".join(split_layer[0])[:-1]
__snake_case: List[Any] = [tuple(("""kvstore""" + split_layer[1]).split("""/"""))]
else:
__snake_case: Any = layer.split("""/""")
__snake_case: List[str] = """/""".join(split_layer[:-1])
__snake_case: Union[str, Any] = (split_layer[-1],)
if "kvstore/path" in layer:
__snake_case: int = F'''{switch_checkpoint_path}/{checkpoint_info[layer]}'''
elif "kvstore/driver" in layer:
__snake_case: Tuple = """file"""
else:
__snake_case: Optional[int] = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Any:
__snake_case: List[Any] = rename_keys(SCREAMING_SNAKE_CASE__)
__snake_case: int = {}
for k, v in current_block.items():
__snake_case: Any = v
__snake_case: Optional[Any] = new_current_block
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = WEIGHTS_NAME) -> Tuple:
__snake_case: Tuple = convert_file_size_to_int(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = []
__snake_case: Union[str, Any] = {}
__snake_case: Any = 0
__snake_case: Tuple = 0
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
with gfile.GFile(switch_checkpoint_path + """/checkpoint""" , """rb""") as fp:
__snake_case: str = serialization.msgpack_restore(fp.read())["""optimizer"""]["""target"""]
__snake_case: int = flatten_dict(SCREAMING_SNAKE_CASE__ , sep="""/""")
__snake_case: Dict = {}
for layer in checkpoint_info.keys():
__snake_case: List[str] = get_key_and_tensorstore_dict(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if curr_real_layer_name in all_layers:
__snake_case: Optional[int] = content
else:
__snake_case: Union[str, Any] = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__snake_case: Optional[Any] = ts.open(unflatten_dict(all_layers[key])).result().read().result()
__snake_case: Optional[int] = torch.tensor(SCREAMING_SNAKE_CASE__)
__snake_case: Union[str, Any] = raw_weights.numel() * dtype_byte_size(raw_weights.dtype)
# use the renaming pattern from the small conversion scripts
__snake_case: Optional[int] = rename_base_flax_keys(tuple(key.split("""/""")) , SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = """/""".join(SCREAMING_SNAKE_CASE__)
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__snake_case: List[str] = os.path.join(
SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{len(SCREAMING_SNAKE_CASE__)+1:05d}-of-???.bin'''))
rename_and_save_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
sharded_state_dicts.append(current_block.keys())
del current_block
__snake_case: Any = {}
__snake_case: int = 0
__snake_case: List[Any] = raw_weights.to(getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
current_block_size += weight_size
total_size += weight_size
# Add the last block
__snake_case: Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{len(SCREAMING_SNAKE_CASE__)+1:05d}-of-???.bin'''))
rename_and_save_block(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
sharded_state_dicts.append(current_block.keys())
# If we only have one shard, we return it
if len(SCREAMING_SNAKE_CASE__) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__snake_case: Optional[int] = {}
__snake_case: int = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE__):
__snake_case: List[Any] = weights_name.replace(
""".bin""" , F'''-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE__):05d}.bin''') # len(sharded_state_dicts):05d}
__snake_case: str = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
__snake_case: Any = shard
for key in shard:
__snake_case: Optional[Any] = shard_file
# Add the metadata
__snake_case: Optional[Any] = {"""total_size""": total_size}
__snake_case: Dict = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , """w""" , encoding="""utf-8""") as f:
__snake_case: Tuple = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__) + """\n"""
f.write(SCREAMING_SNAKE_CASE__)
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--max_shard_size", default="10GB", required=False, help="Max shard size")
parser.add_argument("--dtype", default="bfloat16", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCAmelCase : str = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def A__ ( ) -> Optional[Any]:
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__snake_case: Optional[Any] = SwitchTransformersConfig.from_pretrained("""google/switch-base-8""")
config.save_pretrained("""/home/arthur_huggingface_co/transformers/switch_converted""")
__snake_case: Union[str, Any] = SwitchTransformersForConditionalGeneration.from_pretrained(
"""/home/arthur_huggingface_co/transformers/switch_converted""" , device_map="""auto""")
__snake_case: str = TaTokenizer.from_pretrained("""t5-small""")
__snake_case: Dict = """A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>."""
__snake_case: List[Any] = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="""pt""").input_ids
__snake_case: int = model.generate(SCREAMING_SNAKE_CASE__ , decoder_start_token_id=0)
print(tokenizer.decode(out[0]))
| 363
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
| 0
|
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
__UpperCAmelCase : int = NewType("DataClass", Any)
__UpperCAmelCase : Dict = NewType("DataClassType", Any)
def A__ ( SCREAMING_SNAKE_CASE__) -> str:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F'''Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).''')
def A__ ( SCREAMING_SNAKE_CASE__) -> Callable[[str], Any]:
__snake_case: Tuple = {str(SCREAMING_SNAKE_CASE__): choice for choice in choices}
return lambda SCREAMING_SNAKE_CASE__: str_to_choice.get(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( *,
SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = dataclasses.MISSING , SCREAMING_SNAKE_CASE__ = dataclasses.MISSING , SCREAMING_SNAKE_CASE__ = None , **SCREAMING_SNAKE_CASE__ , ) -> dataclasses.Field:
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
__snake_case: List[Any] = {}
if aliases is not None:
__snake_case: Union[str, Any] = aliases
if help is not None:
__snake_case: Union[str, Any] = help
return dataclasses.field(metadata=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , default_factory=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
def __init__( self : Optional[Any] , A : Union[DataClassType, Iterable[DataClassType]] , **A : Dict ):
# To make the default appear when using --help
if "formatter_class" not in kwargs:
__snake_case: Optional[Any] = ArgumentDefaultsHelpFormatter
super().__init__(**A )
if dataclasses.is_dataclass(A ):
__snake_case: str = [dataclass_types]
__snake_case: Optional[int] = list(A )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(A )
@staticmethod
def UpperCAmelCase__ ( A : ArgumentParser , A : dataclasses.Field ):
__snake_case: List[str] = f'''--{field.name}'''
__snake_case: Optional[int] = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , A ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
__snake_case: Any = kwargs.pop("""aliases""" , [] )
if isinstance(A , A ):
__snake_case: Tuple = [aliases]
__snake_case: Union[str, Any] = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(A , """UnionType""" ) and isinstance(A , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(A ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f''' Problem encountered in field \'{field.name}\'.''' )
if type(A ) not in field.type.__args__:
# filter `str` in Union
__snake_case: Any = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
__snake_case: Union[str, Any] = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
__snake_case: int = (
field.type.__args__[0] if isinstance(A , field.type.__args__[1] ) else field.type.__args__[1]
)
__snake_case: Optional[int] = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
__snake_case: Any = {}
if origin_type is Literal or (isinstance(field.type , A ) and issubclass(field.type , A )):
if origin_type is Literal:
__snake_case: Optional[int] = field.type.__args__
else:
__snake_case: Optional[Any] = [x.value for x in field.type]
__snake_case: Optional[Any] = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
__snake_case: Any = field.default
else:
__snake_case: Dict = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
__snake_case: Optional[Any] = copy(A )
# Hack because type=bool in argparse does not behave as we want.
__snake_case: Optional[int] = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
__snake_case: List[str] = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
__snake_case: List[Any] = default
# This tells argparse we accept 0 or 1 value after --field_name
__snake_case: Optional[int] = """?"""
# This is the value that will get picked if we do --field_name (without value)
__snake_case: Tuple = True
elif isclass(A ) and issubclass(A , A ):
__snake_case: Tuple = field.type.__args__[0]
__snake_case: Optional[int] = """+"""
if field.default_factory is not dataclasses.MISSING:
__snake_case: Tuple = field.default_factory()
elif field.default is dataclasses.MISSING:
__snake_case: int = True
else:
__snake_case: str = field.type
if field.default is not dataclasses.MISSING:
__snake_case: List[str] = field.default
elif field.default_factory is not dataclasses.MISSING:
__snake_case: Optional[int] = field.default_factory()
else:
__snake_case: int = True
parser.add_argument(A , *A , **A )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
__snake_case: Optional[Any] = False
parser.add_argument(f'''--no_{field.name}''' , action="""store_false""" , dest=field.name , **A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : DataClassType ):
if hasattr(A , """_argument_group_name""" ):
__snake_case: List[str] = self.add_argument_group(dtype._argument_group_name )
else:
__snake_case: str = self
try:
__snake_case: Dict[str, type] = get_type_hints(A )
except NameError:
raise RuntimeError(
f'''Type resolution failed for {dtype}. Try declaring the class in global scope or '''
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(A ):
__snake_case: Tuple = """.""".join(map(A , sys.version_info[:3] ) )
raise RuntimeError(
f'''Type resolution failed for {dtype} on Python {python_version}. Try removing '''
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(A ):
if not field.init:
continue
__snake_case: List[str] = type_hints[field.name]
self._parse_dataclass_field(A , A )
def UpperCAmelCase__ ( self : List[str] , A : Optional[int]=None , A : Optional[Any]=False , A : Optional[int]=True , A : Optional[int]=None , A : List[str]=None , ):
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
__snake_case: Dict = []
if args_filename:
args_files.append(Path(A ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
__snake_case: str = ArgumentParser()
args_file_parser.add_argument(A , type=A , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
__snake_case: str = args_file_parser.parse_known_args(args=A )
__snake_case: int = vars(A ).get(args_file_flag.lstrip("""-""" ) , A )
if cmd_args_file_paths:
args_files.extend([Path(A ) for p in cmd_args_file_paths] )
__snake_case: str = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
__snake_case: List[Any] = file_args + args if args is not None else file_args + sys.argv[1:]
__snake_case: Dict = self.parse_known_args(args=A )
__snake_case: str = []
for dtype in self.dataclass_types:
__snake_case: List[str] = {f.name for f in dataclasses.fields(A ) if f.init}
__snake_case: Any = {k: v for k, v in vars(A ).items() if k in keys}
for k in keys:
delattr(A , A )
__snake_case: Union[str, Any] = dtype(**A )
outputs.append(A )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(A )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f'''Some specified arguments are not used by the HfArgumentParser: {remaining_args}''' )
return (*outputs,)
def UpperCAmelCase__ ( self : Any , A : Dict[str, Any] , A : bool = False ):
__snake_case: List[Any] = set(args.keys() )
__snake_case: Dict = []
for dtype in self.dataclass_types:
__snake_case: Optional[Any] = {f.name for f in dataclasses.fields(A ) if f.init}
__snake_case: Union[str, Any] = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
__snake_case: Optional[Any] = dtype(**A )
outputs.append(A )
if not allow_extra_keys and unused_keys:
raise ValueError(f'''Some keys are not used by the HfArgumentParser: {sorted(A )}''' )
return tuple(A )
def UpperCAmelCase__ ( self : Dict , A : str , A : bool = False ):
with open(Path(A ) , encoding="""utf-8""" ) as open_json_file:
__snake_case: Dict = json.loads(open_json_file.read() )
__snake_case: int = self.parse_dict(A , allow_extra_keys=A )
return tuple(A )
def UpperCAmelCase__ ( self : str , A : str , A : bool = False ):
__snake_case: str = self.parse_dict(yaml.safe_load(Path(A ).read_text() ) , allow_extra_keys=A )
return tuple(A )
| 364
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 0
|
import math
import qiskit
def A__ ( SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1 , SCREAMING_SNAKE_CASE__ = 1) -> qiskit.result.counts.Counts:
if (
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
or isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
):
raise TypeError("""inputs must be integers.""")
if (input_a < 0) or (input_a < 0) or (carry_in < 0):
raise ValueError("""inputs must be positive.""")
if (
(math.floor(SCREAMING_SNAKE_CASE__) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__) != input_a)
or (math.floor(SCREAMING_SNAKE_CASE__) != carry_in)
):
raise ValueError("""inputs must be exact integers.""")
if (input_a > 2) or (input_a > 2) or (carry_in > 2):
raise ValueError("""inputs must be less or equal to 2.""")
# build registers
__snake_case: Optional[Any] = qiskit.QuantumRegister(4 , """qr""")
__snake_case: Optional[Any] = qiskit.ClassicalRegister(2 , """cr""")
# list the entries
__snake_case: List[Any] = [input_a, input_a, carry_in]
__snake_case: Union[str, Any] = qiskit.QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for i in range(0 , 3):
if entry[i] == 2:
quantum_circuit.h(SCREAMING_SNAKE_CASE__) # for hadamard entries
elif entry[i] == 1:
quantum_circuit.x(SCREAMING_SNAKE_CASE__) # for 1 entries
elif entry[i] == 0:
quantum_circuit.i(SCREAMING_SNAKE_CASE__) # for 0 entries
# build the circuit
quantum_circuit.ccx(0 , 1 , 3) # ccx = toffoli gate
quantum_circuit.cx(0 , 1)
quantum_circuit.ccx(1 , 2 , 3)
quantum_circuit.cx(1 , 2)
quantum_circuit.cx(0 , 1)
quantum_circuit.measure([2, 3] , SCREAMING_SNAKE_CASE__) # measure the last two qbits
__snake_case: List[Any] = qiskit.Aer.get_backend("""aer_simulator""")
__snake_case: Union[str, Any] = qiskit.execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(f'Total sum count for state is: {quantum_full_adder(1, 1, 1)}')
| 365
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 293
| 0
|
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
level=os.environ.get("LOGLEVEL", "INFO").upper(),
stream=sys.stdout,
)
__UpperCAmelCase : Optional[int] = logging.getLogger(__name__)
__UpperCAmelCase : List[Any] = {"facebook/bart-base": BartForConditionalGeneration}
__UpperCAmelCase : int = {"facebook/bart-base": BartTokenizer}
def A__ ( ) -> Optional[Any]:
__snake_case: Tuple = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""")
parser.add_argument(
"""--validation_file""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""A csv or a json file containing the validation data.""")
parser.add_argument(
"""--max_length""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=SCREAMING_SNAKE_CASE__ , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=SCREAMING_SNAKE_CASE__ , )
parser.add_argument(
"""--config_name""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=SCREAMING_SNAKE_CASE__ , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help="""Where to store the final ONNX file.""")
__snake_case: List[Any] = parser.parse_args()
return args
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__="cpu") -> Dict:
__snake_case: List[str] = model_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__).to(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer_dict[model_name].from_pretrained(SCREAMING_SNAKE_CASE__)
if model_name in ["facebook/bart-base"]:
__snake_case: Any = 0
__snake_case: Any = None
__snake_case: str = 0
return huggingface_model, tokenizer
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Optional[int]:
model.eval()
__snake_case: Tuple = None
__snake_case: List[str] = torch.jit.script(BARTBeamSearchGenerator(SCREAMING_SNAKE_CASE__))
with torch.no_grad():
__snake_case: Tuple = """My friends are cool but they eat too many carbs."""
__snake_case: Union[str, Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="""pt""").to(model.device)
__snake_case: Tuple = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , early_stopping=SCREAMING_SNAKE_CASE__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , SCREAMING_SNAKE_CASE__ , opset_version=14 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=SCREAMING_SNAKE_CASE__ , )
logger.info("""Model exported to {}""".format(SCREAMING_SNAKE_CASE__))
__snake_case: Any = remove_dup_initializers(os.path.abspath(SCREAMING_SNAKE_CASE__))
logger.info("""Deduplicated and optimized model written to {}""".format(SCREAMING_SNAKE_CASE__))
__snake_case: int = onnxruntime.InferenceSession(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = ort_sess.run(
SCREAMING_SNAKE_CASE__ , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(SCREAMING_SNAKE_CASE__),
"""max_length""": np.array(SCREAMING_SNAKE_CASE__),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3)
logger.info("""Model outputs from torch and ONNX Runtime are similar.""")
logger.info("""Success.""")
def A__ ( ) -> Dict:
__snake_case: Dict = parse_args()
__snake_case: Any = 5
__snake_case: List[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO)
transformers.utils.logging.set_verbosity_error()
__snake_case: List[str] = torch.device(args.device)
__snake_case: Tuple = load_model_tokenizer(args.model_name_or_path , SCREAMING_SNAKE_CASE__)
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""")
model.to(SCREAMING_SNAKE_CASE__)
if args.max_length:
__snake_case: Any = args.max_length
if args.num_beams:
__snake_case: List[str] = args.num_beams
if args.output_file_path:
__snake_case: Union[str, Any] = args.output_file_path
else:
__snake_case: Tuple = """BART.onnx"""
logger.info("""Exporting model to ONNX""")
export_and_validate_model(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
main()
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def A__ ( SCREAMING_SNAKE_CASE__ = "laptop") -> DataFrame:
__snake_case: Dict = F'''https://www.amazon.in/laptop/s?k={product}'''
__snake_case: Any = {
"""User-Agent""": """Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36
(KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36""",
"""Accept-Language""": """en-US, en;q=0.5""",
}
__snake_case: List[Any] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ , headers=SCREAMING_SNAKE_CASE__).text)
# Initialize a Pandas dataframe with the column titles
__snake_case: str = DataFrame(
columns=[
"""Product Title""",
"""Product Link""",
"""Current Price of the product""",
"""Product Rating""",
"""MRP of the product""",
"""Discount""",
])
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
"""div""" , attrs={"""class""": """s-result-item""", """data-component-type""": """s-search-result"""} , ) , soup.find_all("""div""" , attrs={"""class""": """a-row a-size-base a-color-base"""}) , ):
try:
__snake_case: List[Any] = item.ha.text
__snake_case: Tuple = """https://www.amazon.in/""" + item.ha.a["""href"""]
__snake_case: Union[str, Any] = item.find("""span""" , attrs={"""class""": """a-offscreen"""}).text
try:
__snake_case: Any = item.find("""span""" , attrs={"""class""": """a-icon-alt"""}).text
except AttributeError:
__snake_case: Any = """Not available"""
try:
__snake_case: List[Any] = (
"""₹"""
+ item.find(
"""span""" , attrs={"""class""": """a-price a-text-price"""}).text.split("""₹""")[1]
)
except AttributeError:
__snake_case: Union[str, Any] = """"""
try:
__snake_case: List[Any] = float(
(
(
float(product_mrp.strip("""₹""").replace(""",""" , """"""))
- float(product_price.strip("""₹""").replace(""",""" , """"""))
)
/ float(product_mrp.strip("""₹""").replace(""",""" , """"""))
)
* 100)
except ValueError:
__snake_case: Tuple = float("""nan""")
except AttributeError:
pass
__snake_case: str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__snake_case: Optional[int] = """ """
__snake_case: Optional[Any] = """ """
data_frame.index += 1
return data_frame
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = "headphones"
get_amazon_product_data(product).to_csv(f'Amazon Product Data for {product}.csv')
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = (UniPCMultistepScheduler,)
lowerCAmelCase__ = (("""num_inference_steps""", 25),)
def UpperCAmelCase__ ( self : Any , **A : Optional[int] ):
__snake_case: int = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""solver_order""": 2,
"""solver_type""": """bh2""",
}
config.update(**A )
return config
def UpperCAmelCase__ ( self : str , A : Optional[Any]=0 , **A : List[str] ):
__snake_case: Dict = dict(self.forward_default_kwargs )
__snake_case: Tuple = kwargs.pop("""num_inference_steps""" , A )
__snake_case: Optional[Any] = self.dummy_sample
__snake_case: List[Any] = 0.1 * sample
__snake_case: List[Any] = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__snake_case: str = self.get_scheduler_config(**A )
__snake_case: Union[str, Any] = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals
__snake_case: Union[str, Any] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
__snake_case: Union[str, Any] = scheduler_class.from_pretrained(A )
new_scheduler.set_timesteps(A )
# copy over dummy past residuals
__snake_case: str = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case: int = sample, sample
for t in range(A , time_step + scheduler.config.solver_order + 1 ):
__snake_case: List[str] = scheduler.step(A , A , A , **A ).prev_sample
__snake_case: str = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : int , A : Dict=0 , **A : str ):
__snake_case: Tuple = dict(self.forward_default_kwargs )
__snake_case: Optional[Any] = kwargs.pop("""num_inference_steps""" , A )
__snake_case: Optional[Any] = self.dummy_sample
__snake_case: Any = 0.1 * sample
__snake_case: str = [residual + 0.2, residual + 0.15, residual + 0.10]
for scheduler_class in self.scheduler_classes:
__snake_case: List[Any] = self.get_scheduler_config()
__snake_case: Tuple = scheduler_class(**A )
scheduler.set_timesteps(A )
# copy over dummy past residuals (must be after setting timesteps)
__snake_case: int = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A )
__snake_case: List[str] = scheduler_class.from_pretrained(A )
# copy over dummy past residuals
new_scheduler.set_timesteps(A )
# copy over dummy past residual (must be after setting timesteps)
__snake_case: Any = dummy_past_residuals[: new_scheduler.config.solver_order]
__snake_case: int = scheduler.step(A , A , A , **A ).prev_sample
__snake_case: int = new_scheduler.step(A , A , A , **A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def UpperCAmelCase__ ( self : str , A : str=None , **A : Any ):
if scheduler is None:
__snake_case: Any = self.scheduler_classes[0]
__snake_case: Any = self.get_scheduler_config(**A )
__snake_case: Optional[Any] = scheduler_class(**A )
__snake_case: Optional[Any] = self.scheduler_classes[0]
__snake_case: List[Any] = self.get_scheduler_config(**A )
__snake_case: Any = scheduler_class(**A )
__snake_case: Optional[int] = 10
__snake_case: List[Any] = self.dummy_model()
__snake_case: Tuple = self.dummy_sample_deter
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
__snake_case: Tuple = model(A , A )
__snake_case: List[str] = scheduler.step(A , A , A ).prev_sample
return sample
def UpperCAmelCase__ ( self : str ):
__snake_case: int = dict(self.forward_default_kwargs )
__snake_case: Union[str, Any] = kwargs.pop("""num_inference_steps""" , A )
for scheduler_class in self.scheduler_classes:
__snake_case: Optional[Any] = self.get_scheduler_config()
__snake_case: Tuple = scheduler_class(**A )
__snake_case: int = self.dummy_sample
__snake_case: str = 0.1 * sample
if num_inference_steps is not None and hasattr(A , """set_timesteps""" ):
scheduler.set_timesteps(A )
elif num_inference_steps is not None and not hasattr(A , """set_timesteps""" ):
__snake_case: Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__snake_case: Any = [residual + 0.2, residual + 0.15, residual + 0.10]
__snake_case: str = dummy_past_residuals[: scheduler.config.solver_order]
__snake_case: Dict = scheduler.timesteps[5]
__snake_case: Union[str, Any] = scheduler.timesteps[6]
__snake_case: Dict = scheduler.step(A , A , A , **A ).prev_sample
__snake_case: Optional[Any] = scheduler.step(A , A , A , **A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def UpperCAmelCase__ ( self : List[Any] ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
__snake_case: str = UniPCMultistepScheduler(**self.get_scheduler_config() )
__snake_case: str = self.full_loop(scheduler=A )
__snake_case: int = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
__snake_case: Optional[int] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
__snake_case: Dict = DEISMultistepScheduler.from_config(scheduler.config )
__snake_case: Optional[Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
__snake_case: Tuple = UniPCMultistepScheduler.from_config(scheduler.config )
__snake_case: Optional[int] = self.full_loop(scheduler=A )
__snake_case: Dict = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
for timesteps in [25, 50, 100, 999, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase__ ( self : Any ):
self.check_over_configs(thresholding=A )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , solver_order=A , solver_type=A , )
def UpperCAmelCase__ ( self : Dict ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase__ ( self : List[Any] ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=A , solver_type=A , prediction_type=A , )
__snake_case: List[str] = self.full_loop(
solver_order=A , solver_type=A , prediction_type=A , )
assert not torch.isnan(A ).any(), "Samples have nan numbers"
def UpperCAmelCase__ ( self : Optional[Any] ):
self.check_over_configs(lower_order_final=A )
self.check_over_configs(lower_order_final=A )
def UpperCAmelCase__ ( self : List[str] ):
for num_inference_steps in [1, 2, 3, 5, 10, 50, 100, 999, 1_000]:
self.check_over_forward(num_inference_steps=A , time_step=0 )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = self.full_loop()
__snake_case: Dict = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.2464 ) < 1E-3
def UpperCAmelCase__ ( self : Any ):
__snake_case: List[Any] = self.full_loop(prediction_type="""v_prediction""" )
__snake_case: List[str] = torch.mean(torch.abs(A ) )
assert abs(result_mean.item() - 0.1014 ) < 1E-3
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[int] = self.scheduler_classes[0]
__snake_case: List[str] = self.get_scheduler_config(thresholding=A , dynamic_thresholding_ratio=0 )
__snake_case: List[str] = scheduler_class(**A )
__snake_case: Optional[Any] = 10
__snake_case: Union[str, Any] = self.dummy_model()
__snake_case: int = self.dummy_sample_deter.half()
scheduler.set_timesteps(A )
for i, t in enumerate(scheduler.timesteps ):
__snake_case: List[str] = model(A , A )
__snake_case: Union[str, Any] = scheduler.step(A , A , A ).prev_sample
assert sample.dtype == torch.floataa
def UpperCAmelCase__ ( self : List[str] , **A : List[str] ):
for scheduler_class in self.scheduler_classes:
__snake_case: str = self.get_scheduler_config(**A )
__snake_case: Optional[int] = scheduler_class(**A )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : str = {"ctrl": "https://huggingface.co/ctrl/resolve/main/config.json"}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """ctrl"""
lowerCAmelCase__ = ["""past_key_values"""]
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] , A : Any=246_534 , A : List[Any]=256 , A : Dict=1_280 , A : List[Any]=8_192 , A : List[str]=48 , A : Dict=16 , A : Tuple=0.1 , A : Dict=0.1 , A : Optional[int]=1E-6 , A : List[Any]=0.02 , A : Tuple=True , **A : List[str] , ):
__snake_case: List[Any] = vocab_size
__snake_case: int = n_positions
__snake_case: int = n_embd
__snake_case: str = n_layer
__snake_case: Dict = n_head
__snake_case: List[str] = dff
__snake_case: Union[str, Any] = resid_pdrop
__snake_case: int = embd_pdrop
__snake_case: List[Any] = layer_norm_epsilon
__snake_case: Any = initializer_range
__snake_case: str = use_cache
super().__init__(**A )
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = None
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
__snake_case: List[str] = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Any = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: int = self.feature_extraction_class.from_json_file(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: List[str] = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
self.assertEqual(feat_extract_second.to_dict() , feat_extract_first.to_dict() )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Any = self.feature_extraction_class()
self.assertIsNotNone(A )
| 371
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : Tuple = {"configuration_reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Any = ["ReformerTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["ReformerTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"ReformerAttention",
"ReformerForMaskedLM",
"ReformerForQuestionAnswering",
"ReformerForSequenceClassification",
"ReformerLayer",
"ReformerModel",
"ReformerModelWithLMHead",
"ReformerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 0
|
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
__UpperCAmelCase : Any = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated.
__UpperCAmelCase : Union[str, Any] = " def __init__(self, config):\n super().__init__()\n self.transform = BertPredictionHeadTransform(config)\n\n # The output weights are the same as the input embeddings, but there is\n # an output-only bias for each token.\n self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n self.bias = nn.Parameter(torch.zeros(config.vocab_size))\n\n # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`\n self.decoder.bias = self.bias\n\n def forward(self, hidden_states):\n hidden_states = self.transform(hidden_states)\n hidden_states = self.decoder(hidden_states)\n return hidden_states\n"
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Union[str, Any] = tempfile.mkdtemp()
os.makedirs(os.path.join(self.transformer_dir , """models/bert/""" ) )
__snake_case: Union[str, Any] = self.transformer_dir
shutil.copy(
os.path.join(A , """src/transformers/models/bert/modeling_bert.py""" ) , os.path.join(self.transformer_dir , """models/bert/modeling_bert.py""" ) , )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = """src/transformers"""
shutil.rmtree(self.transformer_dir )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : Dict , A : Optional[Any]=None ):
__snake_case: Any = comment + f'''\nclass {class_name}(nn.Module):\n''' + class_code
if overwrite_result is not None:
__snake_case: List[Any] = comment + f'''\nclass {class_name}(nn.Module):\n''' + overwrite_result
__snake_case: Any = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 )
__snake_case: Any = black.format_str(A , mode=A )
__snake_case: List[Any] = os.path.join(self.transformer_dir , """new_code.py""" )
with open(A , """w""" , newline="""\n""" ) as f:
f.write(A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=A )
with open(A , """r""" ) as f:
self.assertTrue(f.read() , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[str] = check_copies.find_code_in_transformers("""models.bert.modeling_bert.BertLMPredictionHead""" )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Dict ):
# Base copy consistency
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead""" , """BertLMPredictionHead""" , A , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , re.sub("""Bert""" , """TestModel""" , A ) , )
# Copy consistency with a really long name
__snake_case: Dict = """TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f'''# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}''' , f'''{long_class_name}LMPredictionHead''' , re.sub("""Bert""" , A , A ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel""" , """TestModelLMPredictionHead""" , A , overwrite_result=re.sub("""Bert""" , """TestModel""" , A ) , )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = check_copies.LOCALIZED_READMES["""README_zh-hans.md"""]
__snake_case: Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut.\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (from HuggingFace),"""
""" released together with the paper [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) by Victor Sanh, Lysandre Debut and Thomas Wolf. The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)**"""
""" (from Google Research/Stanford University) released with the paper [ELECTRA: Pre-training text encoders"""
""" as discriminators rather than generators](https://arxiv.org/abs/2003.10555) by Kevin Clark, Minh-Thang"""
""" Luong, Quoc V. Le, Christopher D. Manning."""
)
__snake_case: Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__snake_case: List[Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n1."""
""" **[DistilBERT](https://huggingface.co/transformers/model_doc/distilbert.html)** (来自 HuggingFace) 伴随论文"""
""" [DistilBERT, a distilled version of BERT: smaller, faster, cheaper and"""
""" lighter](https://arxiv.org/abs/1910.01108) 由 Victor Sanh, Lysandre Debut and Thomas Wolf 发布。 The same"""
""" method has been applied to compress GPT2 into"""
""" [DistilGPT2](https://github.com/huggingface/transformers/tree/main/examples/distillation), RoBERTa into"""
""" [DistilRoBERTa](https://github.com/huggingface/transformers/tree/main/examples/distillation),"""
""" Multilingual BERT into"""
""" [DistilmBERT](https://github.com/huggingface/transformers/tree/main/examples/distillation) and a German"""
""" version of DistilBERT.\n1. **[ELECTRA](https://huggingface.co/transformers/model_doc/electra.html)** (来自"""
""" Google Research/Stanford University) 伴随论文 [ELECTRA: Pre-training text encoders as discriminators rather"""
""" than generators](https://arxiv.org/abs/2003.10555) 由 Kevin Clark, Minh-Thang Luong, Quoc V. Le,"""
""" Christopher D. Manning 发布。\n"""
)
__snake_case: int = check_copies.convert_to_localized_md(
A , A , localized_readme["""format_model_list"""] )
self.assertFalse(A )
self.assertEqual(A , A )
__snake_case: Union[str, Any] = check_copies.convert_to_localized_md(
A , A , localized_readme["""format_model_list"""] )
# Check whether the number of models is equal to README.md after conversion.
self.assertTrue(A )
__snake_case: Tuple = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (from Google Research and the"""
""" Toyota Technological Institute at Chicago) released with the paper [ALBERT: A Lite BERT for"""
""" Self-supervised Learning of Language Representations](https://arxiv.org/abs/1909.11942), by Zhenzhong"""
""" Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut."""
)
__snake_case: Union[str, Any] = (
"""1. **[ALBERT](https://huggingface.co/transformers/main/model_doc/albert.html)** (来自 Google Research and"""
""" the Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__snake_case: Any = (
"""1. **[ALBERT](https://huggingface.co/transformers/model_doc/albert.html)** (来自 Google Research and the"""
""" Toyota Technological Institute at Chicago) 伴随论文 [ALBERT: A Lite BERT for Self-supervised Learning of"""
""" Language Representations](https://arxiv.org/abs/1909.11942), 由 Zhenzhong Lan, Mingda Chen, Sebastian"""
""" Goodman, Kevin Gimpel, Piyush Sharma, Radu Soricut 发布。\n"""
)
__snake_case: str = check_copies.convert_to_localized_md(
A , A , localized_readme["""format_model_list"""] )
# Check if the model link is synchronized.
self.assertEqual(A , A )
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293
| 0
|
"""simple docstring"""
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
# The latter is done by `fsmt-make-super-tiny-model.py`.
#
# It will be used then as "stas/tiny-wmt19-en-ru"
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__UpperCAmelCase : List[Any] = "tiny-wmt19-en-ru"
# Build
# borrowed from a test
__UpperCAmelCase : List[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
__UpperCAmelCase : str = dict(zip(vocab, range(len(vocab))))
__UpperCAmelCase : Tuple = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
with tempfile.TemporaryDirectory() as tmpdirname:
__UpperCAmelCase : Optional[Any] = Path(tmpdirname)
__UpperCAmelCase : List[Any] = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
__UpperCAmelCase : str = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
__UpperCAmelCase : int = build_dir / VOCAB_FILES_NAMES["merges_file"]
with open(src_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, "w") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, "w") as fp:
fp.write("\n".join(merges))
__UpperCAmelCase : Optional[int] = FSMTTokenizer(
langs=["en", "ru"],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__UpperCAmelCase : Union[str, Any] = FSMTConfig(
langs=["ru", "en"],
src_vocab_size=1_000,
tgt_vocab_size=1_000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__UpperCAmelCase : Tuple = FSMTForConditionalGeneration(config)
print(f'num of params {tiny_model.num_parameters()}')
# Test
__UpperCAmelCase : str = tokenizer(["Making tiny model"], return_tensors="pt")
__UpperCAmelCase : Any = tiny_model(**batch)
print("test output:", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f'Generated {mname_tiny}')
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : List[Any] = {
"sayakpaul/vit-msn-base": "https://huggingface.co/sayakpaul/vit-msn-base/resolve/main/config.json",
# See all ViT MSN models at https://huggingface.co/models?filter=vit_msn
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """vit_msn"""
def __init__( self : List[Any] , A : Optional[Any]=768 , A : Any=12 , A : Dict=12 , A : str=3_072 , A : int="gelu" , A : List[Any]=0.0 , A : Any=0.0 , A : List[str]=0.02 , A : Dict=1E-06 , A : List[str]=224 , A : Dict=16 , A : Any=3 , A : List[str]=True , **A : List[Any] , ):
super().__init__(**A )
__snake_case: Union[str, Any] = hidden_size
__snake_case: List[Any] = num_hidden_layers
__snake_case: List[Any] = num_attention_heads
__snake_case: Dict = intermediate_size
__snake_case: List[Any] = hidden_act
__snake_case: Dict = hidden_dropout_prob
__snake_case: int = attention_probs_dropout_prob
__snake_case: Optional[Any] = initializer_range
__snake_case: List[str] = layer_norm_eps
__snake_case: List[str] = image_size
__snake_case: int = patch_size
__snake_case: Optional[int] = num_channels
__snake_case: List[str] = qkv_bias
| 353
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293
| 0
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
__UpperCAmelCase : Dict = {
# 1536-bit
5: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 2048-bit
14: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AACAA68FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 3072-bit
15: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 4096-bit
16: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199"
+ "FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 6144-bit
17: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08"
+ "8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B"
+ "302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9"
+ "A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6"
+ "49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8"
+ "FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C"
+ "180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718"
+ "3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D"
+ "04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D"
+ "B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226"
+ "1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC"
+ "E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26"
+ "99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB"
+ "04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2"
+ "233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127"
+ "D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406"
+ "AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918"
+ "DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151"
+ "2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03"
+ "F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F"
+ "BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B"
+ "B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632"
+ "387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E"
+ "6DCC4024FFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
# 8192-bit
18: {
"prime": int(
"FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1"
+ "29024E088A67CC74020BBEA63B139B22514A08798E3404DD"
+ "EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245"
+ "E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED"
+ "EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D"
+ "C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F"
+ "83655D23DCA3AD961C62F356208552BB9ED529077096966D"
+ "670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B"
+ "E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9"
+ "DE2BCBF6955817183995497CEA956AE515D2261898FA0510"
+ "15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64"
+ "ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7"
+ "ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B"
+ "F12FFA06D98A0864D87602733EC86A64521F2B18177B200C"
+ "BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31"
+ "43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7"
+ "88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA"
+ "2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6"
+ "287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED"
+ "1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9"
+ "93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492"
+ "36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD"
+ "F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831"
+ "179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B"
+ "DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF"
+ "5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6"
+ "D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3"
+ "23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA"
+ "CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328"
+ "06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C"
+ "DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE"
+ "12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4"
+ "38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300"
+ "741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568"
+ "3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9"
+ "22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B"
+ "4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A"
+ "062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36"
+ "4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1"
+ "B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92"
+ "4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47"
+ "9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71"
+ "60C980DD98EDD3DFFFFFFFFFFFFFFFFF",
base=16,
),
"generator": 2,
},
}
class __snake_case :
'''simple docstring'''
def __init__( self : Optional[int] , A : int = 14 ):
if group not in primes:
raise ValueError("""Unsupported Group""" )
__snake_case: Any = primes[group]["""prime"""]
__snake_case: List[str] = primes[group]["""generator"""]
__snake_case: str = int(hexlify(urandom(32 ) ) , base=16 )
def UpperCAmelCase__ ( self : List[str] ):
return hex(self.__private_key )[2:]
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = pow(self.generator , self.__private_key , self.prime )
return hex(A )[2:]
def UpperCAmelCase__ ( self : Any , A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(A , (self.prime - 1) // 2 , self.prime ) == 1
)
def UpperCAmelCase__ ( self : Dict , A : str ):
__snake_case: Tuple = int(A , base=16 )
if not self.is_valid_public_key(A ):
raise ValueError("""Invalid public key""" )
__snake_case: List[str] = pow(A , self.__private_key , self.prime )
return shaaaa(str(A ).encode() ).hexdigest()
@staticmethod
def UpperCAmelCase__ ( A : int , A : int ):
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(A , (prime - 1) // 2 , A ) == 1
)
@staticmethod
def UpperCAmelCase__ ( A : str , A : str , A : int = 14 ):
__snake_case: Optional[int] = int(A , base=16 )
__snake_case: Any = int(A , base=16 )
__snake_case: List[str] = primes[group]["""prime"""]
if not DiffieHellman.is_valid_public_key_static(A , A ):
raise ValueError("""Invalid public key""" )
__snake_case: List[str] = pow(A , A , A )
return shaaaa(str(A ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod()
| 355
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 293
| 0
|
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class __snake_case :
'''simple docstring'''
def __init__( self : List[str] , A : Any , A : Dict=13 , A : Any=7 , A : Any=True , A : List[str]=True , A : Tuple=True , A : int=True , A : int=99 , A : Optional[Any]=64 , A : int=5 , A : Optional[Any]=4 , A : List[Any]=37 , A : Tuple="gelu" , A : Optional[Any]=0.1 , A : Dict=0.1 , A : str=512 , A : Union[str, Any]=16 , A : Tuple=2 , A : Any=0.02 , A : str=3 , A : List[str]=4 , A : str=None , ):
__snake_case: List[str] = parent
__snake_case: Optional[Any] = batch_size
__snake_case: List[Any] = seq_length
__snake_case: List[Any] = is_training
__snake_case: Optional[int] = use_input_mask
__snake_case: Dict = use_token_type_ids
__snake_case: Tuple = use_labels
__snake_case: str = vocab_size
__snake_case: Dict = hidden_size
__snake_case: Optional[Any] = num_hidden_layers
__snake_case: str = num_attention_heads
__snake_case: Optional[int] = intermediate_size
__snake_case: List[Any] = hidden_act
__snake_case: int = hidden_dropout_prob
__snake_case: int = attention_probs_dropout_prob
__snake_case: int = max_position_embeddings
__snake_case: Optional[int] = type_vocab_size
__snake_case: str = type_sequence_label_size
__snake_case: Optional[int] = initializer_range
__snake_case: Optional[Any] = num_labels
__snake_case: Dict = num_choices
__snake_case: Union[str, Any] = scope
__snake_case: int = vocab_size - 1
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case: Optional[int] = None
if self.use_input_mask:
__snake_case: Dict = random_attention_mask([self.batch_size, self.seq_length] )
__snake_case: Any = None
if self.use_labels:
__snake_case: Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case: Tuple = self.get_config()
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Optional[Any] ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=A , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[Any] = self.prepare_config_and_inputs()
__snake_case: int = True
return config, input_ids, input_mask, token_labels
def UpperCAmelCase__ ( self : Dict , A : Optional[int] , A : Optional[int] , A : Optional[int] ):
__snake_case: Any = GPTNeoXModel(config=A )
model.to(A )
model.eval()
__snake_case: Any = model(A , attention_mask=A )
__snake_case: Optional[Any] = model(A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : int , A : Any , A : str , A : Any ):
__snake_case: Tuple = True
__snake_case: Dict = GPTNeoXModel(A )
model.to(A )
model.eval()
__snake_case: Any = model(A , attention_mask=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[int] , A : Optional[int] , A : List[Any] , A : List[Any] ):
__snake_case: List[Any] = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
__snake_case: List[str] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int , A : List[Any] , A : Optional[Any] , A : int , A : Any ):
__snake_case: Any = self.num_labels
__snake_case: List[str] = GPTNeoXForQuestionAnswering(A )
model.to(A )
model.eval()
__snake_case: Tuple = model(A , attention_mask=A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] , A : Tuple , A : Optional[int] , A : Optional[int] , A : Union[str, Any] ):
__snake_case: Tuple = self.num_labels
__snake_case: Tuple = GPTNeoXForSequenceClassification(A )
model.to(A )
model.eval()
__snake_case: List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case: Tuple = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : List[Any] , A : str , A : List[str] , A : Any , A : int ):
__snake_case: Any = self.num_labels
__snake_case: List[Any] = GPTNeoXForTokenClassification(A )
model.to(A )
model.eval()
__snake_case: Optional[Any] = model(A , attention_mask=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any , A : Union[str, Any] , A : Any ):
__snake_case: str = True
__snake_case: Dict = GPTNeoXForCausalLM(config=A )
model.to(A )
model.eval()
# first forward pass
__snake_case: List[Any] = model(A , attention_mask=A , use_cache=A )
__snake_case: Any = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__snake_case: Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
__snake_case: Any = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__snake_case: Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
__snake_case: str = torch.cat([input_mask, next_mask] , dim=-1 )
__snake_case: str = model(A , attention_mask=A , output_hidden_states=A )
__snake_case: Tuple = output_from_no_past["""hidden_states"""][0]
__snake_case: List[Any] = model(
A , attention_mask=A , past_key_values=A , output_hidden_states=A , )["""hidden_states"""][0]
# select random slice
__snake_case: Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__snake_case: int = output_from_no_past[:, -3:, random_slice_idx].detach()
__snake_case: str = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A , A , atol=1E-3 ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.prepare_config_and_inputs()
__snake_case: Dict = config_and_inputs
__snake_case: Dict = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase__ = (
{
"""feature-extraction""": GPTNeoXModel,
"""question-answering""": GPTNeoXForQuestionAnswering,
"""text-classification""": GPTNeoXForSequenceClassification,
"""text-generation""": GPTNeoXForCausalLM,
"""token-classification""": GPTNeoXForTokenClassification,
"""zero-shot""": GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = GPTNeoXModelTester(self )
__snake_case: Tuple = ConfigTester(self , config_class=A , hidden_size=64 , num_attention_heads=8 )
def UpperCAmelCase__ ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(A , A , A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
# This regression test was failing with PyTorch < 1.3
__snake_case: Tuple = self.model_tester.prepare_config_and_inputs_for_decoder()
__snake_case: List[str] = None
self.model_tester.create_and_check_model_as_decoder(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(A , A , A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
@unittest.skip(reason="""Feed forward chunking is not implemented""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@parameterized.expand([("""linear""",), ("""dynamic""",)] )
def UpperCAmelCase__ ( self : Tuple , A : Optional[Any] ):
__snake_case: Any = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case: Optional[Any] = ids_tensor([1, 10] , config.vocab_size )
__snake_case: str = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case: Any = GPTNeoXModel(A )
original_model.to(A )
original_model.eval()
__snake_case: str = original_model(A ).last_hidden_state
__snake_case: Dict = original_model(A ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
__snake_case: Dict = {"""type""": scaling_type, """factor""": 10.0}
__snake_case: List[str] = GPTNeoXModel(A )
scaled_model.to(A )
scaled_model.eval()
__snake_case: List[str] = scaled_model(A ).last_hidden_state
__snake_case: Union[str, Any] = scaled_model(A ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(A , A , atol=1E-5 ) )
else:
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(A , A , atol=1E-5 ) )
@require_torch
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
for checkpointing in [True, False]:
__snake_case: Tuple = GPTNeoXForCausalLM.from_pretrained("""EleutherAI/pythia-410m-deduped""" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(A )
__snake_case: str = tokenizer("""My favorite food is""" , return_tensors="""pt""" ).to(A )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
__snake_case: List[Any] = """My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"""
__snake_case: Optional[int] = model.generate(**A , do_sample=A , max_new_tokens=20 )
__snake_case: List[str] = tokenizer.batch_decode(A )[0]
self.assertEqual(A , A )
| 356
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293
| 0
|
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class __snake_case :
'''simple docstring'''
@staticmethod
def UpperCAmelCase__ ( *A : str , **A : List[Any] ):
pass
def A__ ( SCREAMING_SNAKE_CASE__) -> Optional[Any]:
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
__UpperCAmelCase : Tuple = (
"https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"
)
@is_pipeline_test
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self : Optional[int] , A : Union[str, Any] , A : Tuple , A : str ):
__snake_case: str = pipeline(
"""document-question-answering""" , model=A , tokenizer=A , image_processor=A )
__snake_case: Dict = INVOICE_URL
__snake_case: Optional[Any] = list(zip(*apply_tesseract(load_image(A ) , A , """""" ) ) )
__snake_case: Optional[int] = """What is the placebo?"""
__snake_case: Optional[Any] = [
{
"""image""": load_image(A ),
"""question""": question,
},
{
"""image""": image,
"""question""": question,
},
{
"""image""": image,
"""question""": question,
"""word_boxes""": word_boxes,
},
]
return dqa_pipeline, examples
def UpperCAmelCase__ ( self : int , A : Optional[Any] , A : Tuple ):
__snake_case: Tuple = dqa_pipeline(A , top_k=2 )
self.assertEqual(
A , [
[
{"""score""": ANY(A ), """answer""": ANY(A ), """start""": ANY(A ), """end""": ANY(A )},
{"""score""": ANY(A ), """answer""": ANY(A ), """start""": ANY(A ), """end""": ANY(A )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[Any] = pipeline("""document-question-answering""" , model="""hf-internal-testing/tiny-random-layoutlmv2""" )
__snake_case: Optional[int] = INVOICE_URL
__snake_case: List[str] = """How many cats are there?"""
__snake_case: Any = [
{"""score""": 0.0001, """answer""": """oy 2312/2019""", """start""": 38, """end""": 39},
{"""score""": 0.0001, """answer""": """oy 2312/2019 DUE""", """start""": 38, """end""": 40},
]
__snake_case: Dict = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(nested_simplify(A , decimals=4 ) , A )
__snake_case: Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(nested_simplify(A , decimals=4 ) , A )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
__snake_case: List[Any] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case: Union[str, Any] = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(A , [] )
# We can optionnally pass directly the words and bounding boxes
__snake_case: Optional[int] = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
__snake_case: int = []
__snake_case: int = []
__snake_case: str = dqa_pipeline(image=A , question=A , words=A , boxes=A , top_k=2 )
self.assertEqual(A , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: int = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , )
__snake_case: Tuple = INVOICE_URL
__snake_case: List[str] = """What is the invoice number?"""
__snake_case: int = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Optional[Any] = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.9944, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0009, """answer""": """us-001""", """start""": 16, """end""": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def UpperCAmelCase__ ( self : str ):
__snake_case: Dict = pipeline(
"""document-question-answering""" , model="""tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa""" , revision="""9977165""" , max_seq_len=50 , )
__snake_case: int = INVOICE_URL
__snake_case: int = """What is the invoice number?"""
__snake_case: Dict = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Any = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.9974, """answer""": """1110212019""", """start""": 23, """end""": 23},
{"""score""": 0.9948, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self : Any ):
__snake_case: Tuple = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=A )
__snake_case: Optional[Any] = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=A , revision="""3dc6de3""" , )
__snake_case: List[str] = INVOICE_URL
__snake_case: Dict = """What is the invoice number?"""
__snake_case: Tuple = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__snake_case: Tuple = dqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
__snake_case: Any = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
]
]
* 2 , )
__snake_case: int = list(zip(*apply_tesseract(load_image(A ) , A , """""" ) ) )
# This model should also work if `image` is set to None
__snake_case: Optional[Any] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.4251, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.0819, """answer""": """1110212019""", """start""": 23, """end""": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Union[str, Any] = AutoTokenizer.from_pretrained(
"""impira/layoutlm-document-qa""" , revision="""3dc6de3""" , add_prefix_space=A )
__snake_case: Dict = pipeline(
"""document-question-answering""" , model="""impira/layoutlm-document-qa""" , tokenizer=A , revision="""3dc6de3""" , max_seq_len=50 , )
__snake_case: Tuple = INVOICE_URL
__snake_case: Optional[Any] = """What is the invoice number?"""
__snake_case: Optional[int] = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
__snake_case: str = dqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
[
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
]
]
* 2 , )
__snake_case: Optional[Any] = list(zip(*apply_tesseract(load_image(A ) , A , """""" ) ) )
# This model should also work if `image` is set to None
__snake_case: Optional[int] = dqa_pipeline({"""image""": None, """word_boxes""": word_boxes, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(A , decimals=4 ) , [
{"""score""": 0.9999, """answer""": """us-001""", """start""": 16, """end""": 16},
{"""score""": 0.9998, """answer""": """us-001""", """start""": 16, """end""": 16},
] , )
@slow
@require_torch
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = pipeline(
"""document-question-answering""" , model="""naver-clova-ix/donut-base-finetuned-docvqa""" , tokenizer=AutoTokenizer.from_pretrained("""naver-clova-ix/donut-base-finetuned-docvqa""" ) , feature_extractor="""naver-clova-ix/donut-base-finetuned-docvqa""" , )
__snake_case: str = INVOICE_URL
__snake_case: Optional[Any] = """What is the invoice number?"""
__snake_case: Optional[int] = dqa_pipeline(image=A , question=A , top_k=2 )
self.assertEqual(nested_simplify(A , decimals=4 ) , [{"""answer""": """us-001"""}] )
@require_tf
@unittest.skip("""Document question answering not implemented in TF""" )
def UpperCAmelCase__ ( self : str ):
pass
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class __snake_case ( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = FlaxXLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained("""xlm-roberta-base""" )
__snake_case: int = """The dog is cute and lives in the garden house"""
__snake_case: Any = jnp.array([tokenizer.encode(A )] )
__snake_case: Dict = (1, 12, 768) # batch_size, sequence_length, embedding_vector_dim
__snake_case: int = jnp.array(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
__snake_case: int = model(A )["""last_hidden_state"""]
self.assertEqual(output.shape , A )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , A , atol=1E-3 ) )
| 358
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 0
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 359
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 0
|
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Sequence, Value
from .base import TaskTemplate
@dataclass(frozen=__lowerCamelCase )
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = field(default="""question-answering-extractive""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowerCAmelCase__ = Features({"""question""": Value("""string""" ), """context""": Value("""string""" )} )
lowerCAmelCase__ = Features(
{
"""answers""": Sequence(
{
"""text""": Value("""string""" ),
"""answer_start""": Value("""int32""" ),
} )
} )
lowerCAmelCase__ = """question"""
lowerCAmelCase__ = """context"""
lowerCAmelCase__ = """answers"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return {self.question_column: "question", self.context_column: "context", self.answers_column: "answers"}
| 360
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tensorflow_text_available, is_torch_available
__UpperCAmelCase : str = {
"configuration_ernie": ["ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP", "ErnieConfig", "ErnieOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST",
"ErnieForCausalLM",
"ErnieForMaskedLM",
"ErnieForMultipleChoice",
"ErnieForNextSentencePrediction",
"ErnieForPreTraining",
"ErnieForQuestionAnswering",
"ErnieForSequenceClassification",
"ErnieForTokenClassification",
"ErnieModel",
"ErniePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_ernie import ERNIE_PRETRAINED_CONFIG_ARCHIVE_MAP, ErnieConfig, ErnieOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ernie import (
ERNIE_PRETRAINED_MODEL_ARCHIVE_LIST,
ErnieForCausalLM,
ErnieForMaskedLM,
ErnieForMultipleChoice,
ErnieForNextSentencePrediction,
ErnieForPreTraining,
ErnieForQuestionAnswering,
ErnieForSequenceClassification,
ErnieForTokenClassification,
ErnieModel,
ErniePreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Optional[int] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 362
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 0
|
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __snake_case :
'''simple docstring'''
def UpperCAmelCase__ ( self : int , A : Optional[int] , A : Optional[int] , A : List[Any] ):
return None
class __snake_case :
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[Any] , A : int , A : Dict , A : Tuple , A : Any ):
return None
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = [
# (model_name, model_kwargs)
("""bert-base-cased""", {}),
("""gpt2""", {"""use_cache""": False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def UpperCAmelCase__ ( self : Tuple ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A , """tf""" , 12 , **A )
@require_torch
@slow
def UpperCAmelCase__ ( self : Tuple ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(A , """pt""" , 12 , **A )
@require_torch
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
from transformers import BertModel
__snake_case: Union[str, Any] = ["""[UNK]""", """[SEP]""", """[CLS]""", """[PAD]""", """[MASK]""", """some""", """other""", """words"""]
with NamedTemporaryFile(mode="""w+t""" ) as vocab_file:
vocab_file.write("""\n""".join(A ) )
vocab_file.flush()
__snake_case: Optional[Any] = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
__snake_case: List[str] = BertModel(BertConfig(vocab_size=len(A ) ) )
model.save_pretrained(A )
self._test_export(A , """pt""" , 12 , A )
@require_tf
@slow
def UpperCAmelCase__ ( self : Dict ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__snake_case: Union[str, Any] = self._test_export(A , """tf""" , 12 , **A )
__snake_case: Any = quantize(Path(A ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
@require_torch
@slow
def UpperCAmelCase__ ( self : List[Any] ):
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
__snake_case: Any = self._test_export(A , """pt""" , 12 , **A )
__snake_case: Optional[int] = quantize(A )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(A ).stat().st_size:
self.fail("""Quantized model is bigger than initial ONNX model""" )
def UpperCAmelCase__ ( self : List[Any] , A : List[Any] , A : Union[str, Any] , A : Any , A : Tuple=None , **A : List[str] ):
try:
# Compute path
with TemporaryDirectory() as tempdir:
__snake_case: Union[str, Any] = Path(A ).joinpath("""model.onnx""" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(A , A , A , A , A , **A )
return path
except Exception as e:
self.fail(A )
@require_torch
@require_tokenizers
@slow
def UpperCAmelCase__ ( self : str ):
from transformers import BertModel
__snake_case: int = BertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__snake_case: Optional[int] = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(A , A , """pt""" )
@require_tf
@require_tokenizers
@slow
def UpperCAmelCase__ ( self : Tuple ):
from transformers import TFBertModel
__snake_case: List[Any] = TFBertModel(BertConfig.from_pretrained("""lysandre/tiny-bert-random""" ) )
__snake_case: Dict = BertTokenizerFast.from_pretrained("""lysandre/tiny-bert-random""" )
self._test_infer_dynamic_axis(A , A , """tf""" )
def UpperCAmelCase__ ( self : Any , A : List[str] , A : int , A : Tuple ):
__snake_case: List[str] = FeatureExtractionPipeline(A , A )
__snake_case: str = ["""input_ids""", """token_type_ids""", """attention_mask""", """output_0""", """output_1"""]
__snake_case: Dict = infer_shapes(A , A )
# Assert all variables are present
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , A )
self.assertSequenceEqual(variable_names[3:] , A )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: """batch""", 1: """sequence"""} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["""output_0"""] , {0: """batch""", 1: """sequence"""} )
self.assertDictEqual(shapes["""output_1"""] , {0: """batch"""} )
def UpperCAmelCase__ ( self : int ):
__snake_case: Dict = ["""input_ids""", """attention_mask""", """token_type_ids"""]
__snake_case: Union[str, Any] = {"""input_ids""": [1, 2, 3, 4], """attention_mask""": [0, 0, 0, 0], """token_type_ids""": [1, 1, 1, 1]}
__snake_case: int = ensure_valid_input(FuncContiguousArgs() , A , A )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(A ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(A ) , set(A ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(A , (tokens["""input_ids"""], tokens["""token_type_ids"""], tokens["""attention_mask"""]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
__snake_case: Any = ensure_valid_input(FuncNonContiguousArgs() , A , A )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(A ) , 1 )
self.assertEqual(len(A ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["""input_ids"""] )
self.assertEqual(ordered_input_names[0] , """input_ids""" )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Any = generate_identified_filename(Path("""/home/something/my_fake_model.onnx""" ) , """-test""" )
self.assertEqual("""/home/something/my_fake_model-test.onnx""" , generated.as_posix() )
| 363
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
| 0
|
"""simple docstring"""
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 364
|
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
__UpperCAmelCase : Dict = logging.get_logger(__name__) # pylint: disable=invalid-name
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : AutoencoderKL , A : CLIPTextModel , A : CLIPTokenizer , A : UNetaDConditionModel , A : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , A : StableDiffusionSafetyChecker , A : CLIPImageProcessor , ):
super().__init__()
self.register_modules(
vae=A , text_encoder=A , tokenizer=A , unet=A , scheduler=A , safety_checker=A , feature_extractor=A , )
def UpperCAmelCase__ ( self : Optional[Any] , A : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__snake_case: Tuple = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(A )
def UpperCAmelCase__ ( self : str ):
self.enable_attention_slicing(A )
@torch.no_grad()
def __call__( self : List[str] , A : Union[str, List[str]] , A : int = 512 , A : int = 512 , A : int = 50 , A : float = 7.5 , A : Optional[Union[str, List[str]]] = None , A : Optional[int] = 1 , A : float = 0.0 , A : Optional[torch.Generator] = None , A : Optional[torch.FloatTensor] = None , A : Optional[str] = "pil" , A : bool = True , A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , A : int = 1 , A : Optional[torch.FloatTensor] = None , **A : Optional[Any] , ):
if isinstance(A , A ):
__snake_case: int = 1
elif isinstance(A , A ):
__snake_case: Optional[Any] = len(A )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(A )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(A , A ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(A )}.''' )
# get prompt text embeddings
__snake_case: Tuple = self.tokenizer(
A , padding="""max_length""" , max_length=self.tokenizer.model_max_length , return_tensors="""pt""" , )
__snake_case: Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case: List[str] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
__snake_case: Dict = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__snake_case: Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case: List[Any] = text_embeddings.shape
__snake_case: Tuple = text_embeddings.repeat(1 , A , 1 )
__snake_case: Dict = text_embeddings.view(bs_embed * num_images_per_prompt , A , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case: List[str] = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case: List[str]
if negative_prompt is None:
__snake_case: Any = [""""""]
elif type(A ) is not type(A ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(A )} !='''
f''' {type(A )}.''' )
elif isinstance(A , A ):
__snake_case: List[str] = [negative_prompt]
elif batch_size != len(A ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(A )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
""" the batch size of `prompt`.""" )
else:
__snake_case: str = negative_prompt
__snake_case: Any = text_input_ids.shape[-1]
__snake_case: Dict = self.tokenizer(
A , padding="""max_length""" , max_length=A , truncation=A , return_tensors="""pt""" , )
__snake_case: Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case: Optional[Any] = uncond_embeddings.shape[1]
__snake_case: str = uncond_embeddings.repeat(A , A , 1 )
__snake_case: List[Any] = uncond_embeddings.view(batch_size * num_images_per_prompt , A , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case: Any = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case: Tuple = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case: List[Any] = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__snake_case: Optional[Any] = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case: Any = torch.randn(
A , generator=A , device="""cpu""" , dtype=A ).to(self.device )
__snake_case: Tuple = torch.randn(A , generator=A , device="""cpu""" , dtype=A ).to(
self.device )
else:
__snake_case: Dict = torch.randn(
A , generator=A , device=self.device , dtype=A )
__snake_case: Optional[int] = torch.randn(A , generator=A , device=self.device , dtype=A )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
__snake_case: Optional[int] = latents_reference.to(self.device )
__snake_case: List[str] = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__snake_case: int = (latents_shape[3] - latents_shape_reference[3]) // 2
__snake_case: Optional[int] = (latents_shape[2] - latents_shape_reference[2]) // 2
__snake_case: int = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__snake_case: Dict = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__snake_case: List[Any] = 0 if dx < 0 else dx
__snake_case: Dict = 0 if dy < 0 else dy
__snake_case: List[str] = max(-dx , 0 )
__snake_case: int = max(-dy , 0 )
# import pdb
# pdb.set_trace()
__snake_case: List[Any] = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(A )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case: str = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__snake_case: Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case: Optional[int] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__snake_case: int = {}
if accepts_eta:
__snake_case: Optional[Any] = eta
for i, t in enumerate(self.progress_bar(A ) ):
# expand the latents if we are doing classifier free guidance
__snake_case: str = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__snake_case: Dict = self.scheduler.scale_model_input(A , A )
# predict the noise residual
__snake_case: List[Any] = self.unet(A , A , encoder_hidden_states=A ).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case: Any = noise_pred.chunk(2 )
__snake_case: Optional[int] = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case: str = self.scheduler.step(A , A , A , **A ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(A , A , A )
__snake_case: Optional[int] = 1 / 0.1_8215 * latents
__snake_case: List[Any] = self.vae.decode(A ).sample
__snake_case: str = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case: Any = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__snake_case: List[Any] = self.feature_extractor(self.numpy_to_pil(A ) , return_tensors="""pt""" ).to(
self.device )
__snake_case , __snake_case: List[str] = self.safety_checker(
images=A , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__snake_case: Optional[int] = None
if output_type == "pil":
__snake_case: Tuple = self.numpy_to_pil(A )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=A , nsfw_content_detected=A )
| 293
| 0
|
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__UpperCAmelCase : str = {"tokenization_wav2vec2_phoneme": ["Wav2Vec2PhonemeCTCTokenizer"]}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 365
|
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
__UpperCAmelCase : Optional[int] = "\\n\n"
__UpperCAmelCase : Tuple = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
__UpperCAmelCase : Tuple = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to 'cuda' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id='gpt2',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!='']\n >>> results = perplexity.compute(model_id='gpt2',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n ['perplexities', 'mean_perplexity']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def UpperCAmelCase__ ( self : int , A : str , A : Optional[Any] , A : int = 16 , A : bool = True , A : Optional[int]=None ):
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
__snake_case: Optional[Any] = """cuda"""
else:
__snake_case: str = """cuda""" if torch.cuda.is_available() else """cpu"""
__snake_case: Dict = AutoModelForCausalLM.from_pretrained(A )
__snake_case: List[str] = model.to(A )
__snake_case: Optional[Any] = AutoTokenizer.from_pretrained(A )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
__snake_case: Dict = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(A ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
__snake_case: Tuple = model.config.max_length - 1
else:
__snake_case: Optional[Any] = model.config.max_length
__snake_case: Optional[int] = tokenizer(
A , add_special_tokens=A , padding=A , truncation=A , max_length=A , return_tensors="""pt""" , return_attention_mask=A , ).to(A )
__snake_case: Tuple = encodings["""input_ids"""]
__snake_case: Any = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
__snake_case: Optional[int] = []
__snake_case: Optional[int] = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(A ) , A ) ):
__snake_case: Dict = min(start_index + batch_size , len(A ) )
__snake_case: Optional[int] = encoded_texts[start_index:end_index]
__snake_case: List[Any] = attn_masks[start_index:end_index]
if add_start_token:
__snake_case: Tuple = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(A )
__snake_case: Optional[Any] = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
__snake_case: Union[str, Any] = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(A ), attn_mask] , dim=1 )
__snake_case: List[str] = encoded_batch
with torch.no_grad():
__snake_case: Union[str, Any] = model(A , attention_mask=A ).logits
__snake_case: List[str] = out_logits[..., :-1, :].contiguous()
__snake_case: Optional[Any] = labels[..., 1:].contiguous()
__snake_case: Dict = attn_mask[..., 1:].contiguous()
__snake_case: Optional[Any] = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , A ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(A )}
| 293
| 0
|
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
__UpperCAmelCase : Optional[int] = logging.get_logger(__name__)
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> None:
__snake_case: Optional[int] = nn.ModuleList([src_layers[i] for i in layers_to_copy])
assert len(SCREAMING_SNAKE_CASE__) == len(SCREAMING_SNAKE_CASE__), F'''{len(SCREAMING_SNAKE_CASE__)} != {len(SCREAMING_SNAKE_CASE__)}'''
dest_layers.load_state_dict(layers_to_copy.state_dict())
__UpperCAmelCase : Optional[Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
__UpperCAmelCase : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
try:
__snake_case: List[str] = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''')
return list(range(SCREAMING_SNAKE_CASE__))
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[int]:
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''')
elif n_teacher == n_student:
return list(range(SCREAMING_SNAKE_CASE__))
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = "student" , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
__snake_case: Optional[int] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__).save_pretrained(SCREAMING_SNAKE_CASE__) # purely for convenience
__snake_case: str = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__).eval()
else:
assert isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__), F'''teacher must be a model or string got type {type(SCREAMING_SNAKE_CASE__)}'''
__snake_case: Union[str, Any] = teacher.config.to_diff_dict()
try:
__snake_case: Dict = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
__snake_case: Optional[Any] = teacher_e
if d is None:
__snake_case: List[Any] = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d})
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers"""):
__snake_case: List[Any] = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
__snake_case: Any = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
__snake_case: int = teacher_e
if d is None:
__snake_case: Union[str, Any] = teacher_d
if hasattr(teacher.config , """num_encoder_layers"""):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d})
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d})
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(SCREAMING_SNAKE_CASE__)
# Copy weights
__snake_case: Optional[int] = teacher.config_class(**SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = AutoModelForSeqaSeqLM.from_config(SCREAMING_SNAKE_CASE__)
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
__snake_case: Optional[int] = student.load_state_dict(teacher.state_dict() , strict=SCREAMING_SNAKE_CASE__)
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
__snake_case: int = list(range(SCREAMING_SNAKE_CASE__)), list(range(SCREAMING_SNAKE_CASE__))
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''')
student.save_pretrained(SCREAMING_SNAKE_CASE__)
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
__snake_case: List[int] = pick_layers_to_copy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if d_layers_to_copy is None:
__snake_case: List[int] = pick_layers_to_copy(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
try:
if hasattr(
SCREAMING_SNAKE_CASE__ , """prophetnet"""): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , SCREAMING_SNAKE_CASE__)
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , SCREAMING_SNAKE_CASE__)
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , SCREAMING_SNAKE_CASE__)
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , SCREAMING_SNAKE_CASE__)
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , SCREAMING_SNAKE_CASE__)
copy_layers(teacher.decoder.block , student.decoder.block , SCREAMING_SNAKE_CASE__)
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''')
__snake_case: List[str] = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(SCREAMING_SNAKE_CASE__)
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 366
|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__UpperCAmelCase : List[str] = {
"configuration_roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaOnnxConfig"],
"tokenization_roberta": ["RobertaTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = ["RobertaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Tuple = [
"ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaForCausalLM",
"RobertaForMaskedLM",
"RobertaForMultipleChoice",
"RobertaForQuestionAnswering",
"RobertaForSequenceClassification",
"RobertaForTokenClassification",
"RobertaModel",
"RobertaPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[int] = [
"TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaForCausalLM",
"TFRobertaForMaskedLM",
"TFRobertaForMultipleChoice",
"TFRobertaForQuestionAnswering",
"TFRobertaForSequenceClassification",
"TFRobertaForTokenClassification",
"TFRobertaMainLayer",
"TFRobertaModel",
"TFRobertaPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[Any] = [
"FlaxRobertaForCausalLM",
"FlaxRobertaForMaskedLM",
"FlaxRobertaForMultipleChoice",
"FlaxRobertaForQuestionAnswering",
"FlaxRobertaForSequenceClassification",
"FlaxRobertaForTokenClassification",
"FlaxRobertaModel",
"FlaxRobertaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 293
| 0
|
from statistics import mean
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: Union[str, Any] = 0
# Number of processes finished
__snake_case: Tuple = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__snake_case: str = [0] * no_of_process
# List to include calculation results
__snake_case: str = [0] * no_of_process
# Sort by arrival time.
__snake_case: str = [burst_time[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
__snake_case: List[Any] = [process_name[i] for i in np.argsort(SCREAMING_SNAKE_CASE__)]
arrival_time.sort()
while no_of_process > finished_process_count:
__snake_case: str = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__snake_case: str = arrival_time[i]
__snake_case: List[Any] = 0
# Index showing the location of the process being performed
__snake_case: Optional[int] = 0
# Saves the current response ratio.
__snake_case: Optional[int] = 0
for i in range(0 , SCREAMING_SNAKE_CASE__):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__snake_case: int = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__snake_case: int = temp
__snake_case: str = i
# Calculate the turn around time
__snake_case: Tuple = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__snake_case: Any = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> list:
__snake_case: int = [0] * no_of_process
for i in range(0 , SCREAMING_SNAKE_CASE__):
__snake_case: List[Any] = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
__UpperCAmelCase : Optional[int] = 5
__UpperCAmelCase : Tuple = ["A", "B", "C", "D", "E"]
__UpperCAmelCase : int = [1, 2, 3, 4, 5]
__UpperCAmelCase : Optional[Any] = [1, 2, 3, 4, 5]
__UpperCAmelCase : Dict = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
__UpperCAmelCase : str = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
f'{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'
f'{turn_around_time[i]}\t\t\t{waiting_time[i]}'
)
print(f'average waiting time : {mean(waiting_time):.5f}')
print(f'average turn around time : {mean(turn_around_time):.5f}')
| 367
|
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[int] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(A , """hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """neck_hidden_sizes""" ) )
self.parent.assertTrue(hasattr(A , """num_attention_heads""" ) )
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str , A : Dict=13 , A : str=32 , A : Any=2 , A : Optional[Any]=3 , A : str=640 , A : Tuple=4 , A : Dict="silu" , A : List[Any]=3 , A : Any=32 , A : Any=0.1 , A : int=0.1 , A : Dict=0.1 , A : Optional[Any]=0.02 , A : List[Any]=True , A : Tuple=True , A : Any=10 , A : Optional[int]=None , ):
__snake_case: List[Any] = parent
__snake_case: Dict = batch_size
__snake_case: int = image_size
__snake_case: Tuple = patch_size
__snake_case: Tuple = num_channels
__snake_case: str = last_hidden_size
__snake_case: Dict = num_attention_heads
__snake_case: Dict = hidden_act
__snake_case: Tuple = conv_kernel_size
__snake_case: List[str] = output_stride
__snake_case: List[str] = hidden_dropout_prob
__snake_case: Optional[Any] = attention_probs_dropout_prob
__snake_case: int = classifier_dropout_prob
__snake_case: List[Any] = use_labels
__snake_case: Union[str, Any] = is_training
__snake_case: Union[str, Any] = num_labels
__snake_case: str = initializer_range
__snake_case: List[Any] = scope
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case: Tuple = None
__snake_case: Any = None
if self.use_labels:
__snake_case: Union[str, Any] = ids_tensor([self.batch_size] , self.num_labels )
__snake_case: str = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
__snake_case: Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : int ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : str , A : Optional[Any] , A : Any , A : Any , A : Union[str, Any] ):
__snake_case: List[Any] = MobileViTModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : str , A : List[Any] , A : Any , A : Any , A : int ):
__snake_case: str = self.num_labels
__snake_case: Optional[int] = MobileViTForImageClassification(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] , A : str , A : Optional[Any] , A : int , A : str ):
__snake_case: List[Any] = self.num_labels
__snake_case: Dict = MobileViTForSemanticSegmentation(A )
model.to(A )
model.eval()
__snake_case: Union[str, Any] = model(A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
__snake_case: Tuple = model(A , labels=A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case , __snake_case: Any = config_and_inputs
__snake_case: Optional[int] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": MobileViTModel,
"""image-classification""": MobileViTForImageClassification,
"""image-segmentation""": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = MobileViTModelTester(self )
__snake_case: str = MobileViTConfigTester(self , config_class=A , has_text_modality=A )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="""MobileViT does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
pass
@unittest.skip(reason="""MobileViT does not output attentions""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
pass
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = model_class(A )
__snake_case: int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case: Optional[int] = [*signature.parameters.keys()]
__snake_case: List[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Dict ):
def check_hidden_states_output(A : List[Any] , A : int , A : Tuple ):
__snake_case: List[str] = model_class(A )
model.to(A )
model.eval()
with torch.no_grad():
__snake_case: str = model(**self._prepare_for_class(A , A ) )
__snake_case: Optional[int] = outputs.hidden_states
__snake_case: Any = 5
self.assertEqual(len(A ) , A )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
__snake_case: Union[str, Any] = 2
for i in range(len(A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
__snake_case , __snake_case: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case: Optional[Any] = True
check_hidden_states_output(A , A , A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__snake_case: Dict = True
check_hidden_states_output(A , A , A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A )
@slow
def UpperCAmelCase__ ( self : Union[str, Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: List[Any] = MobileViTModel.from_pretrained(A )
self.assertIsNotNone(A )
def A__ ( ) -> Optional[int]:
__snake_case: Optional[Any] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCAmelCase__ ( self : Dict ):
return MobileViTImageProcessor.from_pretrained("""apple/mobilevit-xx-small""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Tuple = MobileViTForImageClassification.from_pretrained("""apple/mobilevit-xx-small""" ).to(A )
__snake_case: str = self.default_image_processor
__snake_case: Optional[Any] = prepare_img()
__snake_case: List[Any] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
# verify the logits
__snake_case: List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , A )
__snake_case: Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = model.to(A )
__snake_case: Dict = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[Any] = prepare_img()
__snake_case: List[str] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: List[Any] = model(**A )
__snake_case: Optional[int] = outputs.logits
# verify the logits
__snake_case: Dict = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , A )
__snake_case: Optional[int] = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = MobileViTForSemanticSegmentation.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: str = model.to(A )
__snake_case: Optional[Any] = MobileViTImageProcessor.from_pretrained("""apple/deeplabv3-mobilevit-xx-small""" )
__snake_case: List[str] = prepare_img()
__snake_case: Optional[int] = image_processor(images=A , return_tensors="""pt""" ).to(A )
# forward pass
with torch.no_grad():
__snake_case: Dict = model(**A )
__snake_case: List[Any] = outputs.logits.detach().cpu()
__snake_case: List[str] = image_processor.post_process_semantic_segmentation(outputs=A , target_sizes=[(50, 60)] )
__snake_case: str = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , A )
__snake_case: int = image_processor.post_process_semantic_segmentation(outputs=A )
__snake_case: Tuple = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , A )
| 293
| 0
|
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_mobilebert import MobileBertTokenizer
__UpperCAmelCase : Tuple = logging.get_logger(__name__)
__UpperCAmelCase : Tuple = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase : List[str] = {
"vocab_file": {"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/vocab.txt"},
"tokenizer_file": {
"mobilebert-uncased": "https://huggingface.co/google/mobilebert-uncased/resolve/main/tokenizer.json"
},
}
__UpperCAmelCase : List[Any] = {"mobilebert-uncased": 512}
__UpperCAmelCase : List[str] = {}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = MobileBertTokenizer
def __init__( self : Optional[int] , A : Tuple=None , A : List[str]=None , A : List[Any]=True , A : Dict="[UNK]" , A : Any="[SEP]" , A : Optional[Any]="[PAD]" , A : Tuple="[CLS]" , A : List[Any]="[MASK]" , A : Optional[int]=True , A : Optional[int]=None , **A : Optional[Any] , ):
super().__init__(
A , tokenizer_file=A , do_lower_case=A , unk_token=A , sep_token=A , pad_token=A , cls_token=A , mask_token=A , tokenize_chinese_chars=A , strip_accents=A , **A , )
__snake_case: Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , A ) != do_lower_case
or normalizer_state.get("""strip_accents""" , A ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , A ) != tokenize_chinese_chars
):
__snake_case: str = getattr(A , normalizer_state.pop("""type""" ) )
__snake_case: str = do_lower_case
__snake_case: Any = strip_accents
__snake_case: Tuple = tokenize_chinese_chars
__snake_case: Tuple = normalizer_class(**A )
__snake_case: Dict = do_lower_case
def UpperCAmelCase__ ( self : Tuple , A : int , A : Optional[Any]=None ):
__snake_case: Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCAmelCase__ ( self : Dict , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Tuple = [self.sep_token_id]
__snake_case: Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__ ( self : Dict , A : str , A : Optional[str] = None ):
__snake_case: List[str] = self._tokenizer.model.save(A , name=A )
return tuple(A )
| 368
|
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: str = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetDownsampleBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Union[str, Any] = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : Any ):
__snake_case: Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
def UpperCAmelCase__ ( self : List[str] ):
__snake_case , __snake_case: List[str] = super().prepare_init_args_and_inputs_for_common()
__snake_case: List[Any] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : int ):
__snake_case , __snake_case: Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[Any] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Any ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipDownBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[Any] ):
return super().get_dummy_input(include_skip_sample=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: str = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: str = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnDownEncoderBlockaD # noqa F405
lowerCAmelCase__ = """down"""
@property
def UpperCAmelCase__ ( self : List[str] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Optional[Any] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
__snake_case: Tuple = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaD # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case: Optional[int] = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
__snake_case: List[str] = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : str ):
__snake_case: Tuple = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: int = super().prepare_init_args_and_inputs_for_common()
__snake_case: int = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UNetMidBlockaDSimpleCrossAttn # noqa F405
lowerCAmelCase__ = """mid"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : str ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = ResnetUpsampleBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Tuple ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = CrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Any = super().prepare_init_args_and_inputs_for_common()
__snake_case: Optional[int] = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: List[Any] = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SimpleCrossAttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_res_hidden_states_tuple=A , include_encoder_hidden_states=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case , __snake_case: Optional[Any] = super().prepare_init_args_and_inputs_for_common()
__snake_case: str = 32
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : int ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = SkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnSkipUpBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : str ):
return super().get_dummy_input(include_res_hidden_states_tuple=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = UpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[int] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : str ):
__snake_case: Union[str, Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Dict = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(A )
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = AttnUpDecoderBlockaD # noqa F405
lowerCAmelCase__ = """up"""
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
return super().get_dummy_input(include_temb=A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = {"""in_channels""": 32, """out_channels""": 32}
__snake_case: Any = self.dummy_input
return init_dict, inputs_dict
def UpperCAmelCase__ ( self : int ):
__snake_case: Any = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(A )
| 293
| 0
|
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = (DDPMScheduler,)
def UpperCAmelCase__ ( self : Optional[Any] , **A : Tuple ):
__snake_case: Union[str, Any] = {
"""num_train_timesteps""": 1_000,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**A )
return config
def UpperCAmelCase__ ( self : Dict ):
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=A )
def UpperCAmelCase__ ( self : Any ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def UpperCAmelCase__ ( self : Optional[int] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def UpperCAmelCase__ ( self : str ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def UpperCAmelCase__ ( self : int ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def UpperCAmelCase__ ( self : List[str] ):
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def UpperCAmelCase__ ( self : Any ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def UpperCAmelCase__ ( self : Any ):
for t in [0, 500, 999]:
self.check_over_forward(time_step=A )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.scheduler_classes[0]
__snake_case: Optional[Any] = self.get_scheduler_config()
__snake_case: Optional[Any] = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = self.scheduler_classes[0]
__snake_case: str = self.get_scheduler_config()
__snake_case: Dict = scheduler_class(**A )
__snake_case: str = len(A )
__snake_case: Dict = self.dummy_model()
__snake_case: Tuple = self.dummy_sample_deter
__snake_case: Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
__snake_case: str = model(A , A )
# 2. predict previous mean of sample x_t-1
__snake_case: List[Any] = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__snake_case: Optional[int] = pred_prev_sample
__snake_case: List[Any] = torch.sum(torch.abs(A ) )
__snake_case: str = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = self.scheduler_classes[0]
__snake_case: Dict = self.get_scheduler_config(prediction_type="""v_prediction""" )
__snake_case: List[str] = scheduler_class(**A )
__snake_case: List[str] = len(A )
__snake_case: Tuple = self.dummy_model()
__snake_case: Dict = self.dummy_sample_deter
__snake_case: Optional[int] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
__snake_case: Any = model(A , A )
# 2. predict previous mean of sample x_t-1
__snake_case: int = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__snake_case: str = pred_prev_sample
__snake_case: int = torch.sum(torch.abs(A ) )
__snake_case: Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: List[Any] = self.scheduler_classes[0]
__snake_case: Optional[Any] = self.get_scheduler_config()
__snake_case: str = scheduler_class(**A )
__snake_case: List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A )
__snake_case: List[str] = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
__snake_case: Dict = -1
else:
__snake_case: Optional[Any] = timesteps[i + 1]
__snake_case: Union[str, Any] = scheduler.previous_timestep(A )
__snake_case: Tuple = prev_t.item()
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = self.scheduler_classes[0]
__snake_case: Optional[int] = self.get_scheduler_config()
__snake_case: str = scheduler_class(**A )
__snake_case: Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(A , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Any = self.scheduler_classes[0]
__snake_case: str = self.get_scheduler_config()
__snake_case: str = scheduler_class(**A )
__snake_case: List[Any] = [100, 87, 50, 1, 0]
__snake_case: List[str] = len(A )
with self.assertRaises(A , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Tuple = self.scheduler_classes[0]
__snake_case: List[str] = self.get_scheduler_config()
__snake_case: List[str] = scheduler_class(**A )
__snake_case: Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=A )
| 369
|
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@property
def UpperCAmelCase__ ( self : Dict ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[int] = ort.SessionOptions()
__snake_case: List[Any] = False
return options
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: List[str] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: int = """A red cat sitting on a park bench"""
__snake_case: Any = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=10 , generator=A , output_type="""np""" , )
__snake_case: List[Any] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Any = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: Optional[Any] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo.png""" )
__snake_case: Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/in_paint/overture-creations-5sI6fQgYIuo_mask.png""" )
__snake_case: Optional[int] = LMSDiscreteScheduler.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , subfolder="""scheduler""" , revision="""onnx""" )
__snake_case: List[Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
"""runwayml/stable-diffusion-inpainting""" , revision="""onnx""" , scheduler=A , safety_checker=A , feature_extractor=A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A )
__snake_case: Optional[int] = """A red cat sitting on a park bench"""
__snake_case: Dict = np.random.RandomState(0 )
__snake_case: Optional[Any] = pipe(
prompt=A , image=A , mask_image=A , guidance_scale=7.5 , num_inference_steps=20 , generator=A , output_type="""np""" , )
__snake_case: List[str] = output.images
__snake_case: str = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
__snake_case: Union[str, Any] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 293
| 0
|
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCAmelCase : Any = logging.get_logger(__name__)
__UpperCAmelCase : List[str] = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
__UpperCAmelCase : List[str] = {
"vocab_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"},
"merges_file": {"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"},
"tokenizer_config_file": {
"facebook/blenderbot-3B": "https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"
},
}
__UpperCAmelCase : Union[str, Any] = {"facebook/blenderbot-3B": 128}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["""input_ids""", """attention_mask"""]
lowerCAmelCase__ = BlenderbotTokenizer
def __init__( self : Optional[Any] , A : int=None , A : Tuple=None , A : str=None , A : Any="replace" , A : Union[str, Any]="<s>" , A : List[Any]="</s>" , A : str="</s>" , A : Any="<s>" , A : Any="<unk>" , A : Any="<pad>" , A : Any="<mask>" , A : Optional[int]=False , A : Optional[Any]=True , **A : List[Any] , ):
super().__init__(
A , A , tokenizer_file=A , errors=A , bos_token=A , eos_token=A , sep_token=A , cls_token=A , unk_token=A , pad_token=A , mask_token=A , add_prefix_space=A , trim_offsets=A , **A , )
__snake_case: int = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A ) != add_prefix_space:
__snake_case: Union[str, Any] = getattr(A , pre_tok_state.pop("""type""" ) )
__snake_case: Tuple = add_prefix_space
__snake_case: Dict = pre_tok_class(**A )
__snake_case: List[Any] = add_prefix_space
__snake_case: Any = """post_processor"""
__snake_case: int = getattr(self.backend_tokenizer , A , A )
if tokenizer_component_instance:
__snake_case: Union[str, Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
__snake_case: Union[str, Any] = tuple(state["""sep"""] )
if "cls" in state:
__snake_case: Dict = tuple(state["""cls"""] )
__snake_case: Dict = False
if state.get("""add_prefix_space""" , A ) != add_prefix_space:
__snake_case: str = add_prefix_space
__snake_case: Any = True
if state.get("""trim_offsets""" , A ) != trim_offsets:
__snake_case: Optional[Any] = trim_offsets
__snake_case: Any = True
if changes_to_apply:
__snake_case: Tuple = getattr(A , state.pop("""type""" ) )
__snake_case: Optional[int] = component_class(**A )
setattr(self.backend_tokenizer , A , A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def UpperCAmelCase__ ( self : Optional[Any] ):
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def UpperCAmelCase__ ( self : Any , A : Any ):
__snake_case: List[str] = AddedToken(A , lstrip=A , rstrip=A ) if isinstance(A , A ) else value
__snake_case: List[Any] = value
def UpperCAmelCase__ ( self : Tuple , *A : List[str] , **A : Dict ):
__snake_case: Dict = kwargs.get("""is_split_into_words""" , A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*A , **A )
def UpperCAmelCase__ ( self : Tuple , *A : List[Any] , **A : Tuple ):
__snake_case: Optional[int] = kwargs.get("""is_split_into_words""" , A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*A , **A )
def UpperCAmelCase__ ( self : Tuple , A : str , A : Optional[str] = None ):
__snake_case: Optional[Any] = self._tokenizer.model.save(A , name=A )
return tuple(A )
def UpperCAmelCase__ ( self : Any , A : List[int] , A : Optional[List[int]] = None ):
__snake_case: Union[str, Any] = [self.sep_token_id]
__snake_case: List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def UpperCAmelCase__ ( self : List[Any] , A : List[int] , A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def UpperCAmelCase__ ( self : Dict , A : "Conversation" ):
__snake_case: Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(""" """ + text )
else:
# Generated responses should contain them already.
inputs.append(A )
__snake_case: List[Any] = """ """.join(A )
__snake_case: int = self.encode(A )
if len(A ) > self.model_max_length:
__snake_case: str = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 370
|
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def A__ ( SCREAMING_SNAKE_CASE__ = 3) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
raise TypeError("""number of qubits must be a integer.""")
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""")
if math.floor(SCREAMING_SNAKE_CASE__) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""")
if number_of_qubits > 10:
raise ValueError("""number of qubits too large to simulate(>10).""")
__snake_case: int = QuantumRegister(SCREAMING_SNAKE_CASE__ , """qr""")
__snake_case: List[str] = ClassicalRegister(SCREAMING_SNAKE_CASE__ , """cr""")
__snake_case: Optional[Any] = QuantumCircuit(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.h(number_of_qubits - i - 1)
counter -= 1
for j in range(SCREAMING_SNAKE_CASE__):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
for k in range(number_of_qubits // 2):
quantum_circuit.swap(SCREAMING_SNAKE_CASE__ , number_of_qubits - k - 1)
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# simulate with 10000 shots
__snake_case: Union[str, Any] = Aer.get_backend("""qasm_simulator""")
__snake_case: Optional[Any] = execute(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , shots=1_0000)
return job.result().get_counts(SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(
f'Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'
)
| 293
| 0
|
from ..utils import DummyObject, requires_backends
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : str , *A : List[Any] , **A : Any ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : Optional[Any] , **A : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict , *A : Optional[int] , **A : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : int , *A : List[Any] , **A : Tuple ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : Tuple , **A : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Dict , **A : Union[str, Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : int , *A : Any , **A : str ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A : Union[str, Any] , **A : Any ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , *A : Union[str, Any] , **A : Optional[int] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Tuple , *A : List[Any] , **A : str ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : int , **A : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] , *A : int , **A : int ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[int] , *A : Dict , **A : Tuple ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , *A : Union[str, Any] , **A : int ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : str , *A : List[str] , **A : List[Any] ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
class __snake_case ( metaclass=__lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = ["""torch""", """transformers""", """onnx"""]
def __init__( self : Optional[int] , *A : Optional[Any] , **A : Any ):
requires_backends(self , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Optional[int] , *A : int , **A : Dict ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , *A : Optional[Any] , **A : Tuple ):
requires_backends(cls , ["""torch""", """transformers""", """onnx"""] )
| 371
|
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline
| 293
| 0
|
import functools
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
__snake_case: str = len(SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = len(SCREAMING_SNAKE_CASE__)
@functools.cache
def min_distance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__snake_case: Optional[Any] = int(worda[indexa] != worda[indexa]) # current letters not identical
return min(
1 + min_distance(indexa + 1 , SCREAMING_SNAKE_CASE__) , 1 + min_distance(SCREAMING_SNAKE_CASE__ , indexa + 1) , diff + min_distance(indexa + 1 , indexa + 1) , )
return min_distance(0 , 0)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 350
|
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : str , *A : Dict , A : Optional[int]=None , A : Tuple=None , **A : Optional[int] ):
super().__init__(*A , **A )
__snake_case: List[Any] = eval_examples
__snake_case: str = post_process_function
def UpperCAmelCase__ ( self : List[Any] , A : Dict=None , A : int=None , A : List[Any]=None , A : str = "eval" ):
__snake_case: int = self.eval_dataset if eval_dataset is None else eval_dataset
__snake_case: Any = self.get_eval_dataloader(A )
__snake_case: Optional[Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Union[str, Any] = self.compute_metrics
__snake_case: List[str] = None
__snake_case: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Tuple = time.time()
try:
__snake_case: Any = eval_loop(
A , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: Optional[int] = compute_metrics
__snake_case: Union[str, Any] = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
__snake_case: List[str] = self.post_process_function(A , A , output.predictions )
__snake_case: List[Any] = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: str = metrics.pop(A )
metrics.update(output.metrics )
else:
__snake_case: List[Any] = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(A )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
__snake_case: str = self.callback_handler.on_evaluate(self.args , self.state , self.control , A )
return metrics
def UpperCAmelCase__ ( self : Optional[Any] , A : List[Any] , A : List[str] , A : str=None , A : str = "test" ):
__snake_case: Optional[Any] = self.get_test_dataloader(A )
# Temporarily disable metric computation, we will do it in the loop here.
__snake_case: Optional[int] = self.compute_metrics
__snake_case: List[Any] = None
__snake_case: str = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
__snake_case: Dict = time.time()
try:
__snake_case: str = eval_loop(
A , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=A , metric_key_prefix=A , )
finally:
__snake_case: List[Any] = compute_metrics
__snake_case: Dict = self.args.eval_batch_size * self.args.world_size
if f'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[f'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
A , A , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
__snake_case: Union[str, Any] = self.post_process_function(A , A , output.predictions , """predict""" )
__snake_case: str = self.compute_metrics(A )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'''{metric_key_prefix}_''' ):
__snake_case: List[str] = metrics.pop(A )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=A )
| 293
| 0
|
def A__ ( ) -> Any:
__snake_case: str = []
__snake_case: str = 1
while len(SCREAMING_SNAKE_CASE__) < 1e6:
constant.append(str(SCREAMING_SNAKE_CASE__))
i += 1
__snake_case: List[Any] = """""".join(SCREAMING_SNAKE_CASE__)
return (
int(constant[0])
* int(constant[9])
* int(constant[99])
* int(constant[999])
* int(constant[9999])
* int(constant[9_9999])
* int(constant[99_9999])
)
if __name__ == "__main__":
print(solution())
| 351
|
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : str = logging.get_logger(__name__)
__UpperCAmelCase : int = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """rwkv"""
lowerCAmelCase__ = {"""max_position_embeddings""": """context_length"""}
def __init__( self : Dict , A : List[Any]=50_277 , A : List[Any]=1_024 , A : Union[str, Any]=4_096 , A : Tuple=32 , A : List[Any]=None , A : Tuple=None , A : Tuple=1E-5 , A : int=0 , A : Optional[int]=0 , A : Dict=6 , A : Dict=False , A : int=True , **A : List[Any] , ):
__snake_case: Tuple = vocab_size
__snake_case: Any = context_length
__snake_case: Dict = hidden_size
__snake_case: Dict = num_hidden_layers
__snake_case: Union[str, Any] = attention_hidden_size if attention_hidden_size is not None else hidden_size
__snake_case: str = intermediate_size if intermediate_size is not None else 4 * hidden_size
__snake_case: Any = layer_norm_epsilon
__snake_case: int = rescale_every
__snake_case: str = use_cache
__snake_case: Dict = bos_token_id
__snake_case: Union[str, Any] = eos_token_id
super().__init__(
tie_word_embeddings=A , bos_token_id=A , eos_token_id=A , **A )
| 293
| 0
|
"""simple docstring"""
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
__snake_case: Optional[int] = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Tuple = emb.weight.shape
__snake_case: str = nn.Linear(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , bias=SCREAMING_SNAKE_CASE__)
__snake_case: Any = emb.weight.data
return lin_layer
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None) -> Optional[int]:
__snake_case: List[str] = {}
for old_key in state_dict.keys():
__snake_case: Optional[int] = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
__snake_case: List[str] = key.replace("""moe_layer.experts.0""" , F'''ffn.experts.expert_{expert_idx}''')
else:
__snake_case: int = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""")
if "gate" in key:
__snake_case: Any = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""")
if "fc2" and "experts" not in key:
__snake_case: Optional[int] = key.replace(""".fc2.""" , """.ffn.fc2.""")
if "fc1" and "experts" not in key:
__snake_case: List[Any] = key.replace(""".fc1.""" , """.ffn.fc1.""")
if ".encoder_attn." in key:
__snake_case: Optional[Any] = key.replace(""".encoder_attn.""" , """.cross_attention.""")
if "encoder_attn_layer_norm" in key:
__snake_case: Optional[Any] = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""")
if "final_layer_norm" in key:
__snake_case: str = key.replace("""final_layer_norm""" , """ff_layer_norm""")
__snake_case: Tuple = state_dict[old_key]
return new_dict
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = WEIGHTS_NAME) -> Optional[Any]:
__snake_case: str = []
__snake_case: Optional[int] = 0
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__)
for expert in range(SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = switch_checkpoint_path + F'''-rank-{expert}.pt'''
if os.path.isfile(SCREAMING_SNAKE_CASE__):
__snake_case: Optional[Any] = torch.load(SCREAMING_SNAKE_CASE__)["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__)
__snake_case: Any = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: int = os.path.join(
SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{len(SCREAMING_SNAKE_CASE__)+1:05d}-of-???.bin'''))
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
sharded_state_dicts.append(expert_state.keys())
total_size += sum([value.numel() for key, value in expert_state.items()]) * dtype_byte_size(
expert_state[list(SCREAMING_SNAKE_CASE__)[0]].dtype)
# Add the last block
__snake_case: str = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{len(SCREAMING_SNAKE_CASE__)+1:05d}-of-???.bin'''))
__snake_case: Optional[int] = torch.load(switch_checkpoint_path + """-shared.pt""")["""model"""]
remove_ignore_keys_(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = rename_fairseq_keys(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
__snake_case: List[str] = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys())
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(SCREAMING_SNAKE_CASE__) == 1:
__snake_case: Tuple = os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# Otherwise, let's build the index
__snake_case: Any = {}
for idx, shard in enumerate(SCREAMING_SNAKE_CASE__):
__snake_case: Tuple = weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-{len(SCREAMING_SNAKE_CASE__):05d}.bin''')
__snake_case: Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE__ , weights_name.replace(""".bin""" , F'''-{idx+1:05d}-of-???.bin'''))
os.rename(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__))
for key in shard:
__snake_case: List[str] = shard_file
# Add the metadata
__snake_case: Dict = {"""total_size""": total_size}
__snake_case: Optional[int] = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) , """w""" , encoding="""utf-8""") as f:
__snake_case: Dict = json.dumps(SCREAMING_SNAKE_CASE__ , indent=2 , sort_keys=SCREAMING_SNAKE_CASE__) + """\n"""
f.write(SCREAMING_SNAKE_CASE__)
return metadata, index
if __name__ == "__main__":
__UpperCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--nllb_moe_checkpoint_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000",
type=str,
required=False,
help="Path to a directory containing a folder per layer. Follows the original Google format.",
)
parser.add_argument("--dtype", default="float32", type=str, required=False, help="dtype of the saved model")
parser.add_argument(
"--pytorch_dump_folder_path",
default="/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b",
type=str,
required=False,
help="Path to the output pytorch model.",
)
__UpperCAmelCase : Optional[int] = parser.parse_args()
__UpperCAmelCase : Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
128,
args.dtype,
)
__UpperCAmelCase : Union[str, Any] = NllbMoeConfig.from_pretrained(
"facebook/nllb-200-3.3B", encoder_sparse_step=4, decoder_sparse_step=4, num_experts=128
)
config.save_pretrained(args.pytorch_dump_folder_path)
__UpperCAmelCase : Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print("Done")
model.save_pretrained(args.pytorch_dump_folder_path)
| 352
|
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, MBartaaTokenizer, MBartaaTokenizerFast, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
)
from ...test_tokenization_common import TokenizerTesterMixin
__UpperCAmelCase : str = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mbart.modeling_mbart import shift_tokens_right
__UpperCAmelCase : Any = 250_004
__UpperCAmelCase : List[str] = 250_020
@require_sentencepiece
@require_tokenizers
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = MBartaaTokenizer
lowerCAmelCase__ = MBartaaTokenizerFast
lowerCAmelCase__ = True
lowerCAmelCase__ = True
def UpperCAmelCase__ ( self : Tuple ):
super().setUp()
# We have a SentencePiece fixture for testing
__snake_case: Optional[int] = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Any = """<s>"""
__snake_case: Tuple = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) , A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) , A )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Dict = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<s>""" )
self.assertEqual(vocab_keys[1] , """<pad>""" )
self.assertEqual(vocab_keys[-1] , """<mask>""" )
self.assertEqual(len(A ) , 1_054 )
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_054 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: Dict = MBartaaTokenizer(A , src_lang="""en_XX""" , tgt_lang="""ro_RO""" , keep_accents=A )
__snake_case: int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(A , ["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__snake_case: Union[str, Any] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """9""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """é""", """."""] , )
__snake_case: List[Any] = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(
A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__snake_case: int = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A , [SPIECE_UNDERLINE + """I""", SPIECE_UNDERLINE + """was""", SPIECE_UNDERLINE + """b""", """or""", """n""", SPIECE_UNDERLINE + """in""", SPIECE_UNDERLINE + """""", """<unk>""", """2""", """0""", """0""", """0""", """,""", SPIECE_UNDERLINE + """and""", SPIECE_UNDERLINE + """this""", SPIECE_UNDERLINE + """is""", SPIECE_UNDERLINE + """f""", """al""", """s""", """<unk>""", """."""] , )
@slow
def UpperCAmelCase__ ( self : Optional[int] ):
# fmt: off
__snake_case: List[str] = {"""input_ids""": [[250_004, 11_062, 82_772, 7, 15, 82_772, 538, 51_529, 237, 17_198, 1_290, 206, 9, 215_175, 1_314, 136, 17_198, 1_290, 206, 9, 56_359, 42, 122_009, 9, 16_466, 16, 87_344, 4_537, 9, 4_717, 78_381, 6, 159_958, 7, 15, 24_480, 618, 4, 527, 22_693, 5_428, 4, 2_777, 24_480, 9_874, 4, 43_523, 594, 4, 803, 18_392, 33_189, 18, 4, 43_523, 24_447, 12_399, 100, 24_955, 83_658, 9_626, 144_057, 15, 839, 22_335, 16, 136, 24_955, 83_658, 83_479, 15, 39_102, 724, 16, 678, 645, 2_789, 1_328, 4_589, 42, 122_009, 115_774, 23, 805, 1_328, 46_876, 7, 136, 53_894, 1_940, 42_227, 41_159, 17_721, 823, 425, 4, 27_512, 98_722, 206, 136, 5_531, 4_970, 919, 17_336, 5, 2], [250_004, 20_080, 618, 83, 82_775, 47, 479, 9, 1_517, 73, 53_894, 333, 80_581, 110_117, 18_811, 5_256, 1_295, 51, 152_526, 297, 7_986, 390, 124_416, 538, 35_431, 214, 98, 15_044, 25_737, 136, 7_108, 43_701, 23, 756, 135_355, 7, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [250_004, 581, 63_773, 119_455, 6, 147_797, 88_203, 7, 645, 70, 21, 3_285, 10_269, 5, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A , model_name="""facebook/mbart-large-50""" , revision="""d3913889c59cd5c9e456b269c376325eabad57e2""" , )
def UpperCAmelCase__ ( self : Union[str, Any] ):
if not self.test_slow_tokenizer:
# as we don't have a slow version, we can't compare the outputs between slow and fast versions
return
__snake_case: Any = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-mbart50""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
__snake_case: Optional[int] = self.rust_tokenizer_class.from_pretrained(A , **A )
__snake_case: Union[str, Any] = self.tokenizer_class.from_pretrained(A , **A )
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Tuple = tokenizer_r.save_pretrained(A )
__snake_case: Optional[int] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__snake_case: Dict = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: Tuple = tokenizer_r.from_pretrained(A )
__snake_case: Optional[Any] = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
# self.assertEqual(getattr(tokenizer_rp, key), getattr(tokenizer_pp, key))
# self.assertEqual(getattr(tokenizer_rp, key + "_id"), getattr(tokenizer_pp, key + "_id"))
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=True
__snake_case: Tuple = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: List[str] = tokenizer_p.save_pretrained(A )
# Checks it save with the same files
self.assertSequenceEqual(A , A )
# Checks everything loads correctly in the same way
__snake_case: List[Any] = tokenizer_r.from_pretrained(A )
__snake_case: Dict = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
# Save tokenizer rust, legacy_format=False
__snake_case: List[str] = tempfile.mkdtemp()
__snake_case: Any = tokenizer_r.save_pretrained(A , legacy_format=A )
__snake_case: Dict = tokenizer_p.save_pretrained(A )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__snake_case: Any = tokenizer_r.from_pretrained(A )
__snake_case: Any = tokenizer_p.from_pretrained(A )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(A , A ) )
shutil.rmtree(A )
@require_torch
@require_sentencepiece
@require_tokenizers
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = """facebook/mbart-large-50-one-to-many-mmt"""
lowerCAmelCase__ = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for Syria is that \"there is no military solution\" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.""",
]
lowerCAmelCase__ = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei"""
""" pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi că noi arme nu vor"""
""" face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
lowerCAmelCase__ = [EN_CODE, 82_74, 12_78_73, 2_59_16, 7, 86_22, 20_71, 4_38, 6_74_85, 53, 18_78_95, 23, 5_17_12, 2]
@classmethod
def UpperCAmelCase__ ( cls : int ):
__snake_case: MBartaaTokenizer = MBartaaTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="""en_XX""" , tgt_lang="""ro_RO""" )
__snake_case: str = 1
return cls
def UpperCAmelCase__ ( self : Any ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ar_AR"""] , 250_001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""en_EN"""] , 250_004 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ro_RO"""] , 250_020 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""mr_IN"""] , 250_038 )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
self.assertIn(A , self.tokenizer.all_special_ids )
__snake_case: Dict = [RO_CODE, 884, 9_019, 96, 9, 916, 86_792, 36, 18_743, 15_596, 5, 2]
__snake_case: str = self.tokenizer.decode(A , skip_special_tokens=A )
__snake_case: Union[str, Any] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=A )
self.assertEqual(A , A )
self.assertNotIn(self.tokenizer.eos_token , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] , A )
__snake_case: Union[str, Any] = 10
__snake_case: List[Any] = self.tokenizer(A , max_length=A , truncation=A ).input_ids[0]
self.assertEqual(ids[0] , A )
self.assertEqual(ids[-1] , 2 )
self.assertEqual(len(A ) , A )
def UpperCAmelCase__ ( self : Tuple ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) , [250_053, 250_001] )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[Any] = tempfile.mkdtemp()
__snake_case: Any = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(A )
__snake_case: Union[str, Any] = MBartaaTokenizer.from_pretrained(A )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , A )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: List[str] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=A , return_tensors="""pt""" )
__snake_case: List[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
assert batch.input_ids[1][0] == EN_CODE
assert batch.input_ids[1][-1] == 2
assert batch.labels[1][0] == RO_CODE
assert batch.labels[1][-1] == 2
assert batch.decoder_input_ids[1][:2].tolist() == [2, RO_CODE]
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: int = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=A , truncation=A , max_length=len(self.expected_src_tokens ) , return_tensors="""pt""" , )
__snake_case: Optional[Any] = shift_tokens_right(batch["""labels"""] , self.tokenizer.pad_token_id )
self.assertIsInstance(A , A )
self.assertEqual((2, 14) , batch.input_ids.shape )
self.assertEqual((2, 14) , batch.attention_mask.shape )
__snake_case: List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , A )
self.assertEqual(2 , batch.decoder_input_ids[0, 0] ) # decoder_start_token_id
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[Any] = self.tokenizer(self.src_text , padding=A , truncation=A , max_length=3 , return_tensors="""pt""" )
__snake_case: Union[str, Any] = self.tokenizer(
text_target=self.tgt_text , padding=A , truncation=A , max_length=10 , return_tensors="""pt""" )
__snake_case: Dict = targets["""input_ids"""]
__snake_case: Any = shift_tokens_right(A , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 10 )
@require_torch
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = self.tokenizer._build_translation_inputs(
"""A test""" , return_tensors="""pt""" , src_lang="""en_XX""" , tgt_lang="""ar_AR""" )
self.assertEqual(
nested_simplify(A ) , {
# en_XX, A, test, EOS
"""input_ids""": [[250_004, 62, 3_034, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 250_001,
} , )
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
__snake_case: Optional[Any] = [1]
__snake_case: str = 0, 0, 0
__snake_case: List[Any] = ugly_nums[ia] * 2
__snake_case: List[str] = ugly_nums[ia] * 3
__snake_case: Tuple = ugly_nums[ia] * 5
for _ in range(1 , SCREAMING_SNAKE_CASE__):
__snake_case: int = min(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
ugly_nums.append(SCREAMING_SNAKE_CASE__)
if next_num == next_a:
ia += 1
__snake_case: Any = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
__snake_case: List[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
__snake_case: Union[str, Any] = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'{ugly_numbers(200) = }')
| 353
|
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
__UpperCAmelCase : str = logging.get_logger(__name__)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : Any , A : int , A : int , A : float , **A : Optional[int] ):
__snake_case: List[str] = feature_size
__snake_case: Optional[int] = sampling_rate
__snake_case: Any = padding_value
__snake_case: Dict = kwargs.pop("""padding_side""" , """right""" )
__snake_case: Union[str, Any] = kwargs.pop("""return_attention_mask""" , A )
super().__init__(**A )
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , A : Union[bool, str, PaddingStrategy] = True , A : Optional[int] = None , A : bool = False , A : Optional[int] = None , A : Optional[bool] = None , A : Optional[Union[str, TensorType]] = None , ):
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
__snake_case: Optional[int] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"""You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"""
f''' to this method that includes {self.model_input_names[0]}, but you provided'''
f''' {list(processed_features.keys() )}''' )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
__snake_case: Any = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A ) == 0:
if return_attention_mask:
__snake_case: Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
__snake_case: int = required_input[0]
if isinstance(A , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
__snake_case: Optional[int] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A ):
__snake_case: Optional[int] = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A ):
__snake_case: str = """tf"""
elif is_torch_tensor(A ):
__snake_case: str = """pt"""
elif isinstance(A , (int, float, list, tuple, np.ndarray) ):
__snake_case: List[str] = """np"""
else:
raise ValueError(
f'''type of {first_element} unknown: {type(A )}. '''
"""Should be one of a python, numpy, pytorch or tensorflow object.""" )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
__snake_case: List[Any] = to_numpy(A )
else:
__snake_case: Union[str, Any] = [to_numpy(A ) for v in value]
# Convert padding_strategy in PaddingStrategy
__snake_case: Union[str, Any] = self._get_padding_strategies(padding=A , max_length=A )
__snake_case: Any = processed_features[self.model_input_names[0]]
__snake_case: int = len(A )
if not all(len(A ) == batch_size for v in processed_features.values() ):
raise ValueError("""Some items in the output dictionary have a different batch size than others.""" )
__snake_case: Union[str, Any] = []
for i in range(A ):
__snake_case: List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
__snake_case: Tuple = self._truncate(
A , max_length=A , pad_to_multiple_of=A , truncation=A , )
truncated_inputs.append(A )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
__snake_case: Optional[Any] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
__snake_case: List[str] = PaddingStrategy.MAX_LENGTH
__snake_case: List[Any] = {}
for i in range(A ):
# padding
__snake_case: Any = self._pad(
truncated_inputs[i] , max_length=A , padding_strategy=A , pad_to_multiple_of=A , return_attention_mask=A , )
for key, value in outputs.items():
if key not in batch_outputs:
__snake_case: Optional[Any] = []
if value.dtype is np.dtype(np.floataa ):
__snake_case: str = value.astype(np.floataa )
batch_outputs[key].append(A )
return BatchFeature(A , tensor_type=A )
def UpperCAmelCase__ ( self : int , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , A : Optional[int] = None , A : Optional[bool] = None , ):
__snake_case: List[Any] = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
__snake_case: List[str] = len(A )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Dict = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
__snake_case: List[str] = np.ones(len(A ) , dtype=np.intaa )
if needs_to_be_padded:
__snake_case: Any = max_length - len(A )
if self.padding_side == "right":
if return_attention_mask:
__snake_case: Optional[int] = np.pad(
processed_features["""attention_mask"""] , (0, difference) )
__snake_case: Any = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
__snake_case: Union[str, Any] = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
__snake_case: Dict = np.pad(
processed_features["""attention_mask"""] , (difference, 0) )
__snake_case: Union[str, Any] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
__snake_case: str = np.pad(
A , A , """constant""" , constant_values=self.padding_value )
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return processed_features
def UpperCAmelCase__ ( self : Optional[Any] , A : Union[Dict[str, np.ndarray], BatchFeature] , A : Optional[int] = None , A : Optional[int] = None , A : Optional[bool] = None , ):
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("""When setting ``truncation=True``, make sure that ``max_length`` is defined.""" )
__snake_case: List[str] = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
__snake_case: List[Any] = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
__snake_case: Tuple = len(A ) > max_length
if needs_to_be_truncated:
__snake_case: List[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
__snake_case: int = processed_features["""attention_mask"""][:max_length]
return processed_features
def UpperCAmelCase__ ( self : int , A : int=False , A : int=None ):
# Get padding strategy
if padding is not False:
if padding is True:
__snake_case: Optional[int] = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A , A ):
__snake_case: Optional[int] = PaddingStrategy(A )
elif isinstance(A , A ):
__snake_case: Any = padding
else:
__snake_case: Any = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'''When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined''' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"""Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"""
""" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.""" )
return padding_strategy
| 293
| 0
|
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 16_00, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[str] ):
if self.framework == "pytorch":
subprocess.run(
f'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="""utf-8""" , check=A , )
assert hasattr(self , """env""" )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Any ):
# configuration for running training on smdistributed Model Parallel
__snake_case: Union[str, Any] = {
"""enabled""": True,
"""processes_per_host""": 8,
}
__snake_case: List[Any] = {
"""enabled""": True,
"""parameters""": {
"""microbatches""": 4,
"""placement_strategy""": """spread""",
"""pipeline""": """interleaved""",
"""optimize""": """speed""",
"""partitions""": 4,
"""ddp""": True,
},
}
__snake_case: List[Any] = {"""smdistributed""": {"""modelparallel""": smp_options}, """mpi""": mpi_options}
__snake_case: Optional[int] = """trainer""" if self.script == """run_glue.py""" else """smtrainer"""
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=A , instance_type=self.instance_type , debugger_hook_config=A , hyperparameters={
**self.env.hyperparameters,
"""model_name_or_path""": self.model_name_or_path,
"""max_steps""": 500,
} , metric_definitions=self.env.metric_definitions , distribution=A , py_version="""py36""" , )
def UpperCAmelCase__ ( self : List[Any] , A : Union[str, Any] ):
TrainingJobAnalytics(A ).export_csv(f'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def UpperCAmelCase__ ( self : List[str] , A : List[str] ):
# create estimator
__snake_case: List[str] = self.create_estimator(A )
# run training
estimator.fit()
# result dataframe
__snake_case: List[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__snake_case: Dict = list(result_metrics_df[result_metrics_df.metric_name == """eval_accuracy"""]["""value"""] )
__snake_case: Optional[int] = list(result_metrics_df[result_metrics_df.metric_name == """eval_loss"""]["""value"""] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__snake_case: List[Any] = (
Session().describe_training_job(estimator.latest_training_job.name ).get("""TrainingTimeInSeconds""" , 999_999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["""eval_accuracy"""] for t in eval_accuracy )
assert all(t <= self.results["""eval_loss"""] for t in eval_loss )
# dump tests result into json file to share in PR
with open(f'''{estimator.latest_training_job.name}.json''' , """w""" ) as outfile:
json.dump({"""train_time""": train_runtime, """eval_accuracy""": eval_accuracy, """eval_loss""": eval_loss} , A )
| 354
|
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293
| 0
|
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
__UpperCAmelCase : Tuple = "scheduler_config.json"
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 1
lowerCAmelCase__ = 2
lowerCAmelCase__ = 3
lowerCAmelCase__ = 4
lowerCAmelCase__ = 5
lowerCAmelCase__ = 6
lowerCAmelCase__ = 7
lowerCAmelCase__ = 8
lowerCAmelCase__ = 9
lowerCAmelCase__ = 10
lowerCAmelCase__ = 11
lowerCAmelCase__ = 12
lowerCAmelCase__ = 13
lowerCAmelCase__ = 14
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case :
'''simple docstring'''
lowerCAmelCase__ = SCHEDULER_CONFIG_NAME
lowerCAmelCase__ = []
lowerCAmelCase__ = True
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] , A : Dict[str, Any] = None , A : Optional[str] = None , A : Dict=False , **A : Any , ):
__snake_case: Any = cls.load_config(
pretrained_model_name_or_path=A , subfolder=A , return_unused_kwargs=A , return_commit_hash=A , **A , )
return cls.from_config(A , return_unused_kwargs=A , **A )
def UpperCAmelCase__ ( self : List[Any] , A : Union[str, os.PathLike] , A : bool = False , **A : List[Any] ):
self.save_config(save_directory=A , push_to_hub=A , **A )
@property
def UpperCAmelCase__ ( self : List[Any] ):
return self._get_compatibles()
@classmethod
def UpperCAmelCase__ ( cls : str ):
__snake_case: Tuple = list(set([cls.__name__] + cls._compatibles ) )
__snake_case: List[str] = importlib.import_module(__name__.split(""".""" )[0] )
__snake_case: Optional[int] = [
getattr(A , A ) for c in compatible_classes_str if hasattr(A , A )
]
return compatible_classes
| 355
|
import copy
import tempfile
import unittest
from huggingface_hub import HfFolder, delete_repo
from parameterized import parameterized
from requests.exceptions import HTTPError
from transformers import AutoConfig, GenerationConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@parameterized.expand([(None,), ("""foo.json""",)] )
def UpperCAmelCase__ ( self : List[str] , A : Optional[Any] ):
__snake_case: Any = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A , config_name=A )
__snake_case: Optional[int] = GenerationConfig.from_pretrained(A , config_name=A )
# Checks parameters that were specified
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.temperature , 0.7 )
self.assertEqual(loaded_config.length_penalty , 1.0 )
self.assertEqual(loaded_config.bad_words_ids , [[1, 2, 3], [4, 5]] )
# Checks parameters that were not specified (defaults)
self.assertEqual(loaded_config.top_k , 50 )
self.assertEqual(loaded_config.max_length , 20 )
self.assertEqual(loaded_config.max_time , A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: str = AutoConfig.from_pretrained("""gpt2""" )
__snake_case: Any = GenerationConfig.from_model_config(A )
__snake_case: str = GenerationConfig()
# The generation config has loaded a few non-default parameters from the model config
self.assertNotEqual(A , A )
# One of those parameters is eos_token_id -- check if it matches
self.assertNotEqual(generation_config_from_model.eos_token_id , default_generation_config.eos_token_id )
self.assertEqual(generation_config_from_model.eos_token_id , model_config.eos_token_id )
def UpperCAmelCase__ ( self : str ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Tuple = {
"""max_new_tokens""": 1_024,
"""foo""": """bar""",
}
__snake_case: List[str] = copy.deepcopy(A )
__snake_case: Optional[int] = generation_config.update(**A )
# update_kwargs was not modified (no side effects)
self.assertEqual(A , A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(generation_config.max_new_tokens , 1_024 )
# `.update()` returns a dictionary of unused kwargs
self.assertEqual(A , {"""foo""": """bar"""} )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: List[str] = GenerationConfig()
__snake_case: Optional[int] = """bar"""
with tempfile.TemporaryDirectory("""test-generation-config""" ) as tmp_dir:
generation_config.save_pretrained(A )
__snake_case: Any = GenerationConfig.from_pretrained(A )
# update_kwargs was used to update the config on valid attributes
self.assertEqual(new_config.foo , """bar""" )
__snake_case: int = GenerationConfig.from_model_config(A )
assert not hasattr(A , """foo""" ) # no new kwargs should be initialized if from config
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Dict = GenerationConfig()
self.assertEqual(default_config.temperature , 1.0 )
self.assertEqual(default_config.do_sample , A )
self.assertEqual(default_config.num_beams , 1 )
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , bad_words_ids=[[1, 2, 3], [4, 5]] , )
self.assertEqual(config.temperature , 0.7 )
self.assertEqual(config.do_sample , A )
self.assertEqual(config.num_beams , 1 )
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(A )
__snake_case: Tuple = GenerationConfig.from_pretrained(A , temperature=1.0 )
self.assertEqual(loaded_config.temperature , 1.0 )
self.assertEqual(loaded_config.do_sample , A )
self.assertEqual(loaded_config.num_beams , 1 ) # default value
@is_staging_test
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def UpperCAmelCase__ ( cls : List[str] ):
__snake_case: Optional[int] = TOKEN
HfFolder.save_token(A )
@classmethod
def UpperCAmelCase__ ( cls : List[Any] ):
try:
delete_repo(token=cls._token , repo_id="""test-generation-config""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-generation-config-org""" )
except HTTPError:
pass
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Optional[int] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""test-generation-config""" , use_auth_token=self._token )
__snake_case: str = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-generation-config""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""test-generation-config""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[Any] = GenerationConfig.from_pretrained(f'''{USER}/test-generation-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = GenerationConfig(
do_sample=A , temperature=0.7 , length_penalty=1.0 , )
config.push_to_hub("""valid_org/test-generation-config-org""" , use_auth_token=self._token )
__snake_case: int = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-generation-config-org""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
A , repo_id="""valid_org/test-generation-config-org""" , push_to_hub=A , use_auth_token=self._token )
__snake_case: Optional[int] = GenerationConfig.from_pretrained("""valid_org/test-generation-config-org""" )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(A , getattr(A , A ) )
| 293
| 0
|
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import (
BaseOutput,
OptionalDependencyNotAvailable,
is_flax_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_onnx_available,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_cycle_diffusion import CycleDiffusionPipeline
from .pipeline_stable_diffusion import StableDiffusionPipeline
from .pipeline_stable_diffusion_attend_and_excite import StableDiffusionAttendAndExcitePipeline
from .pipeline_stable_diffusion_imgaimg import StableDiffusionImgaImgPipeline
from .pipeline_stable_diffusion_inpaint import StableDiffusionInpaintPipeline
from .pipeline_stable_diffusion_inpaint_legacy import StableDiffusionInpaintPipelineLegacy
from .pipeline_stable_diffusion_instruct_pixapix import StableDiffusionInstructPixaPixPipeline
from .pipeline_stable_diffusion_latent_upscale import StableDiffusionLatentUpscalePipeline
from .pipeline_stable_diffusion_ldmad import StableDiffusionLDMaDPipeline
from .pipeline_stable_diffusion_model_editing import StableDiffusionModelEditingPipeline
from .pipeline_stable_diffusion_panorama import StableDiffusionPanoramaPipeline
from .pipeline_stable_diffusion_paradigms import StableDiffusionParadigmsPipeline
from .pipeline_stable_diffusion_sag import StableDiffusionSAGPipeline
from .pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from .pipeline_stable_unclip import StableUnCLIPPipeline
from .pipeline_stable_unclip_imgaimg import StableUnCLIPImgaImgPipeline
from .safety_checker import StableDiffusionSafetyChecker
from .stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import StableDiffusionImageVariationPipeline
else:
from .pipeline_stable_diffusion_image_variation import StableDiffusionImageVariationPipeline
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.26.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionPixaPixZeroPipeline,
)
else:
from .pipeline_stable_diffusion_depthaimg import StableDiffusionDepthaImgPipeline
from .pipeline_stable_diffusion_diffedit import StableDiffusionDiffEditPipeline
from .pipeline_stable_diffusion_pixapix_zero import StableDiffusionPixaPixZeroPipeline
try:
if not (
is_torch_available()
and is_transformers_available()
and is_k_diffusion_available()
and is_k_diffusion_version(">=", "0.0.12")
):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipeline_stable_diffusion_k_diffusion import StableDiffusionKDiffusionPipeline
try:
if not (is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_onnx_objects import * # noqa F403
else:
from .pipeline_onnx_stable_diffusion import OnnxStableDiffusionPipeline, StableDiffusionOnnxPipeline
from .pipeline_onnx_stable_diffusion_imgaimg import OnnxStableDiffusionImgaImgPipeline
from .pipeline_onnx_stable_diffusion_inpaint import OnnxStableDiffusionInpaintPipeline
from .pipeline_onnx_stable_diffusion_inpaint_legacy import OnnxStableDiffusionInpaintPipelineLegacy
from .pipeline_onnx_stable_diffusion_upscale import OnnxStableDiffusionUpscalePipeline
if is_transformers_available() and is_flax_available():
import flax
@flax.struct.dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
from ...schedulers.scheduling_pndm_flax import PNDMSchedulerState
from .pipeline_flax_stable_diffusion import FlaxStableDiffusionPipeline
from .pipeline_flax_stable_diffusion_imgaimg import FlaxStableDiffusionImgaImgPipeline
from .pipeline_flax_stable_diffusion_inpaint import FlaxStableDiffusionInpaintPipeline
from .safety_checker_flax import FlaxStableDiffusionSafetyChecker
| 356
|
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__UpperCAmelCase : Tuple = {
"distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
"roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
"bert": (BertConfig, BertForMaskedLM, BertTokenizer),
"gpt2": (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def A__ ( SCREAMING_SNAKE_CASE__) -> Union[str, Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> str:
if args.student_type == "roberta":
__snake_case: Optional[Any] = False
elif args.student_type == "gpt2":
__snake_case: str = False
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
if args.student_type == "roberta":
__snake_case: Optional[int] = False
def A__ ( ) -> Tuple:
__snake_case: Optional[int] = argparse.ArgumentParser(description="""Training""")
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""")
parser.add_argument(
"""--dump_path""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The output directory (log, checkpoints, parameters, etc.)""")
parser.add_argument(
"""--data_file""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=SCREAMING_SNAKE_CASE__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""Path to the student configuration.""")
parser.add_argument(
"""--student_pretrained_weights""" , default=SCREAMING_SNAKE_CASE__ , type=SCREAMING_SNAKE_CASE__ , help="""Load student initialization checkpoint.""")
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=SCREAMING_SNAKE_CASE__ , help="""Teacher type (BERT, RoBERTa).""")
parser.add_argument("""--teacher_name""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The teacher model.""")
parser.add_argument("""--temperature""" , default=2.0 , type=SCREAMING_SNAKE_CASE__ , help="""Temperature for the softmax temperature.""")
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the distillation loss. Must be >=0.""")
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight for the CLM loss. Must be >=0.""")
parser.add_argument("""--alpha_mse""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the MSE loss. Must be >=0.""")
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""")
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""")
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to mask out.""")
parser.add_argument("""--word_keep""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to keep.""")
parser.add_argument("""--word_rand""" , default=0.1 , type=SCREAMING_SNAKE_CASE__ , help="""Proportion of tokens to randomly replace.""")
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=SCREAMING_SNAKE_CASE__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=SCREAMING_SNAKE_CASE__ , help="""The token counts in the data_file for MLM.""")
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=SCREAMING_SNAKE_CASE__ , default=3 , help="""Number of pass on the whole dataset.""")
parser.add_argument("""--batch_size""" , type=SCREAMING_SNAKE_CASE__ , default=5 , help="""Batch size (for each process).""")
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=SCREAMING_SNAKE_CASE__ , default=50 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=SCREAMING_SNAKE_CASE__ , help="""Linear warmup proportion.""")
parser.add_argument("""--weight_decay""" , default=0.0 , type=SCREAMING_SNAKE_CASE__ , help="""Weight decay if we apply some.""")
parser.add_argument("""--learning_rate""" , default=5e-4 , type=SCREAMING_SNAKE_CASE__ , help="""The initial learning rate for Adam.""")
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=SCREAMING_SNAKE_CASE__ , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=SCREAMING_SNAKE_CASE__ , help="""Max gradient norm.""")
parser.add_argument("""--initializer_range""" , default=0.02 , type=SCREAMING_SNAKE_CASE__ , help="""Random initialization range.""")
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=SCREAMING_SNAKE_CASE__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=SCREAMING_SNAKE_CASE__ , default=1 , help="""Number of GPUs in the node.""")
parser.add_argument("""--local_rank""" , type=SCREAMING_SNAKE_CASE__ , default=-1 , help="""Distributed training - Local rank""")
parser.add_argument("""--seed""" , type=SCREAMING_SNAKE_CASE__ , default=56 , help="""Random seed""")
parser.add_argument("""--log_interval""" , type=SCREAMING_SNAKE_CASE__ , default=500 , help="""Tensorboard logging interval.""")
parser.add_argument("""--checkpoint_interval""" , type=SCREAMING_SNAKE_CASE__ , default=4000 , help="""Checkpoint interval.""")
__snake_case: List[Any] = parser.parse_args()
sanity_checks(SCREAMING_SNAKE_CASE__)
# ARGS #
init_gpu_params(SCREAMING_SNAKE_CASE__)
set_seed(SCREAMING_SNAKE_CASE__)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
F'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""")
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(F'''Experiment will be dumped and logged in {args.dump_path}''')
# SAVE PARAMS #
logger.info(F'''Param: {args}''')
with open(os.path.join(args.dump_path , """parameters.json""") , """w""") as f:
json.dump(vars(SCREAMING_SNAKE_CASE__) , SCREAMING_SNAKE_CASE__ , indent=4)
git_log(args.dump_path)
__snake_case , __snake_case , __snake_case: str = MODEL_CLASSES[args.student_type]
__snake_case , __snake_case , __snake_case: Union[str, Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
__snake_case: Tuple = teacher_tokenizer_class.from_pretrained(args.teacher_name)
__snake_case: str = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
__snake_case: List[str] = tokenizer.all_special_tokens.index(SCREAMING_SNAKE_CASE__)
__snake_case: Optional[Any] = tokenizer.all_special_ids[idx]
logger.info(F'''Special tokens {special_tok_ids}''')
__snake_case: Optional[Any] = special_tok_ids
__snake_case: List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file , """rb""") as fp:
__snake_case: int = pickle.load(SCREAMING_SNAKE_CASE__)
if args.mlm:
logger.info(F'''Loading token counts from {args.token_counts} (already pre-computed)''')
with open(args.token_counts , """rb""") as fp:
__snake_case: List[str] = pickle.load(SCREAMING_SNAKE_CASE__)
__snake_case: Dict = np.maximum(SCREAMING_SNAKE_CASE__ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
__snake_case: Union[str, Any] = 0.0 # do not predict special tokens
__snake_case: Any = torch.from_numpy(SCREAMING_SNAKE_CASE__)
else:
__snake_case: Any = None
__snake_case: Union[str, Any] = LmSeqsDataset(params=SCREAMING_SNAKE_CASE__ , data=SCREAMING_SNAKE_CASE__)
logger.info("""Data loader created.""")
# STUDENT #
logger.info(F'''Loading student config from {args.student_config}''')
__snake_case: Tuple = student_config_class.from_pretrained(args.student_config)
__snake_case: List[str] = True
if args.student_pretrained_weights is not None:
logger.info(F'''Loading pretrained weights from {args.student_pretrained_weights}''')
__snake_case: Optional[int] = student_model_class.from_pretrained(args.student_pretrained_weights , config=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Union[str, Any] = student_model_class(SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
student.to(F'''cuda:{args.local_rank}''')
logger.info("""Student loaded.""")
# TEACHER #
__snake_case: Optional[int] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=SCREAMING_SNAKE_CASE__)
if args.n_gpu > 0:
teacher.to(F'''cuda:{args.local_rank}''')
logger.info(F'''Teacher loaded from {args.teacher_name}.''')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
__snake_case: List[str] = Distiller(
params=SCREAMING_SNAKE_CASE__ , dataset=SCREAMING_SNAKE_CASE__ , token_probs=SCREAMING_SNAKE_CASE__ , student=SCREAMING_SNAKE_CASE__ , teacher=SCREAMING_SNAKE_CASE__)
distiller.train()
logger.info("""Let's go get some drinks.""")
if __name__ == "__main__":
main()
| 293
| 0
|
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __snake_case :
'''simple docstring'''
def __init__( self : str , A : List[Any] , A : str=13 , A : List[str]=7 , A : Dict=True , A : Dict=True , A : Dict=True , A : List[str]=True , A : Union[str, Any]=99 , A : int=24 , A : Any=2 , A : int=6 , A : List[str]=37 , A : Any="gelu" , A : str=0.1 , A : Dict=0.1 , A : Dict=512 , A : Dict=16 , A : Optional[int]=2 , A : List[Any]=0.02 , A : Optional[int]=3 , A : Tuple=None , A : List[str]=1_000 , ):
__snake_case: Optional[Any] = parent
__snake_case: int = batch_size
__snake_case: Optional[int] = seq_length
__snake_case: Tuple = is_training
__snake_case: str = use_input_mask
__snake_case: List[Any] = use_token_type_ids
__snake_case: Dict = use_labels
__snake_case: Optional[int] = vocab_size
__snake_case: Union[str, Any] = hidden_size
__snake_case: int = num_hidden_layers
__snake_case: Any = num_attention_heads
__snake_case: Dict = intermediate_size
__snake_case: Union[str, Any] = hidden_act
__snake_case: Any = hidden_dropout_prob
__snake_case: Dict = attention_probs_dropout_prob
__snake_case: List[Any] = max_position_embeddings
__snake_case: Optional[Any] = type_vocab_size
__snake_case: Any = type_sequence_label_size
__snake_case: List[Any] = initializer_range
__snake_case: str = num_labels
__snake_case: Any = scope
__snake_case: str = range_bbox
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__snake_case: int = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
__snake_case: Any = bbox[i, j, 3]
__snake_case: Dict = bbox[i, j, 1]
__snake_case: Union[str, Any] = t
if bbox[i, j, 2] < bbox[i, j, 0]:
__snake_case: Optional[Any] = bbox[i, j, 2]
__snake_case: Dict = bbox[i, j, 0]
__snake_case: str = t
__snake_case: Optional[int] = None
if self.use_input_mask:
__snake_case: Tuple = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__snake_case: Optional[Any] = None
if self.use_token_type_ids:
__snake_case: Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__snake_case: Dict = None
__snake_case: Optional[int] = None
if self.use_labels:
__snake_case: Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case: List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__snake_case: int = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def UpperCAmelCase__ ( self : Tuple ):
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : int , A : Dict , A : Dict , A : List[Any] , A : Optional[Any] , A : Optional[Any] , A : List[Any] , A : List[Any] , ):
__snake_case: Optional[int] = LiltModel(config=A )
model.to(A )
model.eval()
__snake_case: int = model(A , bbox=A , attention_mask=A , token_type_ids=A )
__snake_case: List[str] = model(A , bbox=A , token_type_ids=A )
__snake_case: Dict = model(A , bbox=A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def UpperCAmelCase__ ( self : int , A : Optional[Any] , A : str , A : List[Any] , A : Optional[int] , A : int , A : str , A : Any , ):
__snake_case: Union[str, Any] = self.num_labels
__snake_case: int = LiltForTokenClassification(config=A )
model.to(A )
model.eval()
__snake_case: Optional[Any] = model(
A , bbox=A , attention_mask=A , token_type_ids=A , labels=A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : Union[str, Any] , A : Dict , A : Tuple , A : int , A : int , A : Optional[int] , A : str , A : str , ):
__snake_case: Optional[int] = LiltForQuestionAnswering(config=A )
model.to(A )
model.eval()
__snake_case: Dict = model(
A , bbox=A , attention_mask=A , token_type_ids=A , start_positions=A , end_positions=A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: Union[str, Any] = self.prepare_config_and_inputs()
(
__snake_case
): str = config_and_inputs
__snake_case: Optional[int] = {
"""input_ids""": input_ids,
"""bbox""": bbox,
"""token_type_ids""": token_type_ids,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class __snake_case ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
"""feature-extraction""": LiltModel,
"""question-answering""": LiltForQuestionAnswering,
"""text-classification""": LiltForSequenceClassification,
"""token-classification""": LiltForTokenClassification,
"""zero-shot""": LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : Dict , A : Any , A : str , A : Any , A : Optional[int] , A : Optional[Any] ):
return True
def UpperCAmelCase__ ( self : Dict ):
__snake_case: List[str] = LiltModelTester(self )
__snake_case: str = ConfigTester(self , config_class=A , hidden_size=37 )
def UpperCAmelCase__ ( self : str ):
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__snake_case: Tuple = type
self.model_tester.create_and_check_model(*A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*A )
@slow
def UpperCAmelCase__ ( self : List[Any] ):
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case: Optional[int] = LiltModel.from_pretrained(A )
self.assertIsNotNone(A )
@require_torch
@slow
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Tuple = LiltModel.from_pretrained("""SCUT-DLVCLab/lilt-roberta-en-base""" ).to(A )
__snake_case: int = torch.tensor([[1, 2]] , device=A )
__snake_case: Optional[int] = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=A )
# forward pass
with torch.no_grad():
__snake_case: Union[str, Any] = model(input_ids=A , bbox=A )
__snake_case: Optional[int] = torch.Size([1, 2, 768] )
__snake_case: Optional[int] = torch.tensor(
[[-0.0653, 0.0950, -0.0061], [-0.0545, 0.0926, -0.0324]] , device=A , )
self.assertTrue(outputs.last_hidden_state.shape , A )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , A , atol=1E-3 ) )
| 357
|
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
"The `image_to_image.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionImg2ImgPipeline` instead."
)
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ = 10 , SCREAMING_SNAKE_CASE__ = 22) -> int:
__snake_case: Union[str, Any] = range(1 , SCREAMING_SNAKE_CASE__)
__snake_case: Tuple = range(1 , SCREAMING_SNAKE_CASE__)
return sum(
1 for power in powers for base in bases if len(str(base**power)) == power)
if __name__ == "__main__":
print(f'{solution(10, 22) = }')
| 358
|
import argparse
from collections import defaultdict
import yaml
__UpperCAmelCase : int = "docs/source/en/_toctree.yml"
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: Union[str, Any] = defaultdict(SCREAMING_SNAKE_CASE__)
for doc in model_doc:
counts[doc["local"]] += 1
__snake_case: Dict = [key for key, value in counts.items() if value > 1]
__snake_case: Optional[Any] = []
for duplicate_key in duplicates:
__snake_case: Tuple = list({doc["""title"""] for doc in model_doc if doc["""local"""] == duplicate_key})
if len(SCREAMING_SNAKE_CASE__) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
"""`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the """
"""others.""")
# Only add this once
new_doc.append({"""local""": duplicate_key, """title""": titles[0]})
# Add none duplicate-keys
new_doc.extend([doc for doc in model_doc if counts[doc["""local"""]] == 1])
# Sort
return sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__: s["title"].lower())
def A__ ( SCREAMING_SNAKE_CASE__=False) -> List[str]:
with open(SCREAMING_SNAKE_CASE__ , encoding="""utf-8""") as f:
__snake_case: Optional[int] = yaml.safe_load(f.read())
# Get to the API doc
__snake_case: Dict = 0
while content[api_idx]["title"] != "API":
api_idx += 1
__snake_case: str = content[api_idx]["""sections"""]
# Then to the model doc
__snake_case: List[Any] = 0
while api_doc[model_idx]["title"] != "Models":
model_idx += 1
__snake_case: Dict = api_doc[model_idx]["""sections"""]
__snake_case: int = [(idx, section) for idx, section in enumerate(SCREAMING_SNAKE_CASE__) if """sections""" in section]
__snake_case: Optional[int] = False
for idx, modality_doc in modalities_docs:
__snake_case: Dict = modality_doc["""sections"""]
__snake_case: List[str] = clean_model_doc_toc(SCREAMING_SNAKE_CASE__)
if old_modality_doc != new_modality_doc:
__snake_case: List[str] = True
if overwrite:
__snake_case: Dict = new_modality_doc
if diff:
if overwrite:
__snake_case: Dict = model_doc
__snake_case: int = api_doc
with open(SCREAMING_SNAKE_CASE__ , """w""" , encoding="""utf-8""") as f:
f.write(yaml.dump(SCREAMING_SNAKE_CASE__ , allow_unicode=SCREAMING_SNAKE_CASE__))
else:
raise ValueError(
"""The model doc part of the table of content is not properly sorted, run `make style` to fix this.""")
if __name__ == "__main__":
__UpperCAmelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
__UpperCAmelCase : str = parser.parse_args()
check_model_doc(args.fix_and_overwrite)
| 293
| 0
|
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class __snake_case :
'''simple docstring'''
def __init__( self : int , A : str ):
__snake_case: Optional[int] = data
__snake_case: List[str] = [0x67_45_23_01, 0xef_cd_ab_89, 0x98_ba_dc_fe, 0x10_32_54_76, 0xc3_d2_e1_f0]
@staticmethod
def UpperCAmelCase__ ( A : str , A : Any ):
return ((n << b) | (n >> (32 - b))) & 0xff_ff_ff_ff
def UpperCAmelCase__ ( self : Any ):
__snake_case: int = b"""\x80""" + b"""\x00""" * (63 - (len(self.data ) + 8) % 64)
__snake_case: Optional[int] = self.data + padding + struct.pack(""">Q""" , 8 * len(self.data ) )
return padded_data
def UpperCAmelCase__ ( self : Union[str, Any] ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def UpperCAmelCase__ ( self : List[str] , A : Dict ):
__snake_case: Union[str, Any] = list(struct.unpack(""">16L""" , A ) ) + [0] * 64
for i in range(16 , 80 ):
__snake_case: int = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Dict = self.padding()
__snake_case: Optional[Any] = self.split_blocks()
for block in self.blocks:
__snake_case: List[Any] = self.expand_block(A )
__snake_case: Any = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
__snake_case: List[str] = (b & c) | ((~b) & d)
__snake_case: Union[str, Any] = 0x5a_82_79_99
elif 20 <= i < 40:
__snake_case: str = b ^ c ^ d
__snake_case: Optional[Any] = 0x6e_d9_eb_a1
elif 40 <= i < 60:
__snake_case: str = (b & c) | (b & d) | (c & d)
__snake_case: Dict = 0x8f_1b_bc_dc
elif 60 <= i < 80:
__snake_case: int = b ^ c ^ d
__snake_case: Any = 0xca_62_c1_d6
__snake_case: List[Any] = (
self.rotate(A , 5 ) + f + e + k + expanded_block[i] & 0xff_ff_ff_ff,
a,
self.rotate(A , 30 ),
c,
d,
)
__snake_case: Union[str, Any] = (
self.h[0] + a & 0xff_ff_ff_ff,
self.h[1] + b & 0xff_ff_ff_ff,
self.h[2] + c & 0xff_ff_ff_ff,
self.h[3] + d & 0xff_ff_ff_ff,
self.h[4] + e & 0xff_ff_ff_ff,
)
return ("{:08x}" * 5).format(*self.h )
def A__ ( ) -> Optional[int]:
__snake_case: str = b"""Test String"""
assert SHAaHash(SCREAMING_SNAKE_CASE__).final_hash() == hashlib.shaa(SCREAMING_SNAKE_CASE__).hexdigest() # noqa: S324
def A__ ( ) -> Union[str, Any]:
__snake_case: List[str] = argparse.ArgumentParser(description="""Process some strings or files""")
parser.add_argument(
"""--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument("""--file""" , dest="""input_file""" , help="""Hash contents of a file""")
__snake_case: Dict = parser.parse_args()
__snake_case: Optional[int] = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""") as f:
__snake_case: int = f.read()
else:
__snake_case: Any = bytes(SCREAMING_SNAKE_CASE__ , """utf-8""")
print(SHAaHash(SCREAMING_SNAKE_CASE__).final_hash())
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 359
|
from __future__ import annotations
from decimal import Decimal
from numpy import array
def A__ ( SCREAMING_SNAKE_CASE__) -> list[list[float]]:
__snake_case: Any = Decimal
# Check if the provided matrix has 2 rows and 2 columns
# since this implementation only works for 2x2 matrices
if len(SCREAMING_SNAKE_CASE__) == 2 and len(matrix[0]) == 2 and len(matrix[1]) == 2:
# Calculate the determinant of the matrix
__snake_case: Tuple = float(
d(matrix[0][0]) * d(matrix[1][1]) - d(matrix[1][0]) * d(matrix[0][1]))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creates a copy of the matrix with swapped positions of the elements
__snake_case: Optional[int] = [[0.0, 0.0], [0.0, 0.0]]
__snake_case , __snake_case: Optional[Any] = matrix[1][1], matrix[0][0]
__snake_case , __snake_case: Union[str, Any] = -matrix[1][0], -matrix[0][1]
# Calculate the inverse of the matrix
return [
[(float(d(SCREAMING_SNAKE_CASE__)) / determinant) or 0.0 for n in row] for row in swapped_matrix
]
elif (
len(SCREAMING_SNAKE_CASE__) == 3
and len(matrix[0]) == 3
and len(matrix[1]) == 3
and len(matrix[2]) == 3
):
# Calculate the determinant of the matrix using Sarrus rule
__snake_case: Any = float(
(
(d(matrix[0][0]) * d(matrix[1][1]) * d(matrix[2][2]))
+ (d(matrix[0][1]) * d(matrix[1][2]) * d(matrix[2][0]))
+ (d(matrix[0][2]) * d(matrix[1][0]) * d(matrix[2][1]))
)
- (
(d(matrix[0][2]) * d(matrix[1][1]) * d(matrix[2][0]))
+ (d(matrix[0][1]) * d(matrix[1][0]) * d(matrix[2][2]))
+ (d(matrix[0][0]) * d(matrix[1][2]) * d(matrix[2][1]))
))
if determinant == 0:
raise ValueError("""This matrix has no inverse.""")
# Creating cofactor matrix
__snake_case: Tuple = [
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
[d(0.0), d(0.0), d(0.0)],
]
__snake_case: Dict = (d(matrix[1][1]) * d(matrix[2][2])) - (
d(matrix[1][2]) * d(matrix[2][1])
)
__snake_case: Tuple = -(
(d(matrix[1][0]) * d(matrix[2][2])) - (d(matrix[1][2]) * d(matrix[2][0]))
)
__snake_case: Optional[int] = (d(matrix[1][0]) * d(matrix[2][1])) - (
d(matrix[1][1]) * d(matrix[2][0])
)
__snake_case: Union[str, Any] = -(
(d(matrix[0][1]) * d(matrix[2][2])) - (d(matrix[0][2]) * d(matrix[2][1]))
)
__snake_case: str = (d(matrix[0][0]) * d(matrix[2][2])) - (
d(matrix[0][2]) * d(matrix[2][0])
)
__snake_case: List[Any] = -(
(d(matrix[0][0]) * d(matrix[2][1])) - (d(matrix[0][1]) * d(matrix[2][0]))
)
__snake_case: Optional[Any] = (d(matrix[0][1]) * d(matrix[1][2])) - (
d(matrix[0][2]) * d(matrix[1][1])
)
__snake_case: List[str] = -(
(d(matrix[0][0]) * d(matrix[1][2])) - (d(matrix[0][2]) * d(matrix[1][0]))
)
__snake_case: Optional[int] = (d(matrix[0][0]) * d(matrix[1][1])) - (
d(matrix[0][1]) * d(matrix[1][0])
)
# Transpose the cofactor matrix (Adjoint matrix)
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
__snake_case: Tuple = cofactor_matrix[j][i]
# Inverse of the matrix using the formula (1/determinant) * adjoint matrix
__snake_case: List[Any] = array(SCREAMING_SNAKE_CASE__)
for i in range(3):
for j in range(3):
inverse_matrix[i][j] /= d(SCREAMING_SNAKE_CASE__)
# Calculate the inverse of the matrix
return [[float(d(SCREAMING_SNAKE_CASE__)) or 0.0 for n in row] for row in inverse_matrix]
raise ValueError("""Please provide a matrix of size 2x2 or 3x3.""")
| 293
| 0
|
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.multicontrolnet import MultiControlNetModel # noqa: F401
from ..controlnet.pipeline_controlnet import StableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `StableDiffusionControlNetPipeline` or `MultiControlNetModel` from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import StableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 360
|
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> int:
if not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__):
__snake_case: Optional[int] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(SCREAMING_SNAKE_CASE__)
if number < 1:
__snake_case: Optional[int] = F'''Input value of [number={number}] must be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE__)
elif number == 1:
return 3
elif number == 2:
return 5
else:
__snake_case: List[Any] = int(math.log(number // 3 , 2)) + 2
__snake_case: str = [3, 5]
__snake_case: int = 2
__snake_case: List[str] = 3
for block in range(1 , SCREAMING_SNAKE_CASE__):
for _ in range(SCREAMING_SNAKE_CASE__):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1])
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
__UpperCAmelCase : Optional[int] = 0
try:
__UpperCAmelCase : int = proth(number)
except ValueError:
print(f'ValueError: there is no {number}th Proth number')
continue
print(f'The {number}th Proth number: {value}')
| 293
| 0
|
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
__UpperCAmelCase : int = logging.getLogger()
def A__ ( SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: int = {}
__snake_case: List[str] = os.path.join(SCREAMING_SNAKE_CASE__ , """all_results.json""")
if os.path.exists(SCREAMING_SNAKE_CASE__):
with open(SCREAMING_SNAKE_CASE__ , """r""") as f:
__snake_case: List[Any] = json.load(SCREAMING_SNAKE_CASE__)
else:
raise ValueError(F'''can\'t find {path}''')
return results
__UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Union[str, Any] ):
import xla_spawn
__snake_case: Optional[Any] = self.get_auto_remove_tmp_dir()
__snake_case: List[Any] = f'''
./examples/pytorch/text-classification/run_glue.py
--num_cores=8
./examples/pytorch/text-classification/run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--debug tpu_metrics_debug
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
'''.split()
with patch.object(A , """argv""" , A ):
__snake_case: Any = time()
xla_spawn.main()
__snake_case: str = time()
__snake_case: str = get_results(A )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase__ ( self : Union[str, Any] ):
import xla_spawn
__snake_case: Optional[int] = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(A , """argv""" , A ):
xla_spawn.main()
| 361
|
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = 42
class __snake_case ( __lowerCamelCase , __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = True
@register_to_config
def __init__( self : Union[str, Any] , A : int = 3 , A : int = 3 , A : Tuple[str] = ("DownEncoderBlock2D",) , A : Tuple[str] = ("UpDecoderBlock2D",) , A : Tuple[int] = (64,) , A : int = 1 , A : str = "silu" , A : int = 4 , A : int = 32 , A : int = 32 , A : float = 0.1_8215 , ):
super().__init__()
# pass init params to Encoder
__snake_case: Any = Encoder(
in_channels=A , out_channels=A , down_block_types=A , block_out_channels=A , layers_per_block=A , act_fn=A , norm_num_groups=A , double_z=A , )
# pass init params to Decoder
__snake_case: int = Decoder(
in_channels=A , out_channels=A , up_block_types=A , block_out_channels=A , layers_per_block=A , norm_num_groups=A , act_fn=A , )
__snake_case: Dict = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
__snake_case: int = nn.Convad(A , A , 1 )
__snake_case: List[str] = False
__snake_case: Optional[int] = False
# only relevant if vae tiling is enabled
__snake_case: Any = self.config.sample_size
__snake_case: int = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
__snake_case: Union[str, Any] = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
__snake_case: Optional[int] = 0.25
def UpperCAmelCase__ ( self : int , A : List[str] , A : Optional[Any]=False ):
if isinstance(A , (Encoder, Decoder) ):
__snake_case: str = value
def UpperCAmelCase__ ( self : str , A : bool = True ):
__snake_case: Union[str, Any] = use_tiling
def UpperCAmelCase__ ( self : Optional[int] ):
self.enable_tiling(A )
def UpperCAmelCase__ ( self : List[Any] ):
__snake_case: List[str] = True
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = {}
def fn_recursive_add_processors(A : str , A : torch.nn.Module , A : Dict[str, AttentionProcessor] ):
if hasattr(A , """set_processor""" ):
__snake_case: List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , A , A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(A , A , A )
return processors
def UpperCAmelCase__ ( self : Optional[int] , A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
__snake_case: Any = len(self.attn_processors.keys() )
if isinstance(A , A ) and len(A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(A : str , A : torch.nn.Module , A : Optional[Any] ):
if hasattr(A , """set_processor""" ):
if not isinstance(A , A ):
module.set_processor(A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , A , A )
for name, module in self.named_children():
fn_recursive_attn_processor(A , A , A )
def UpperCAmelCase__ ( self : List[str] ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[Any] , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(A , return_dict=A )
if self.use_slicing and x.shape[0] > 1:
__snake_case: List[Any] = [self.encoder(A ) for x_slice in x.split(1 )]
__snake_case: Optional[Any] = torch.cat(A )
else:
__snake_case: str = self.encoder(A )
__snake_case: Any = self.quant_conv(A )
__snake_case: Tuple = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(A , return_dict=A )
__snake_case: Optional[int] = self.post_quant_conv(A )
__snake_case: Union[str, Any] = self.decoder(A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
@apply_forward_hook
def UpperCAmelCase__ ( self : Tuple , A : torch.FloatTensor , A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
__snake_case: Union[str, Any] = [self._decode(A ).sample for z_slice in z.split(1 )]
__snake_case: List[str] = torch.cat(A )
else:
__snake_case: int = self._decode(A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : Any , A : Tuple , A : int , A : List[Any] ):
__snake_case: int = min(a.shape[2] , b.shape[2] , A )
for y in range(A ):
__snake_case: Dict = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def UpperCAmelCase__ ( self : Union[str, Any] , A : Optional[Any] , A : List[str] , A : List[str] ):
__snake_case: Dict = min(a.shape[3] , b.shape[3] , A )
for x in range(A ):
__snake_case: Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def UpperCAmelCase__ ( self : int , A : torch.FloatTensor , A : bool = True ):
__snake_case: List[str] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
__snake_case: Dict = int(self.tile_latent_min_size * self.tile_overlap_factor )
__snake_case: Dict = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
__snake_case: Optional[int] = []
for i in range(0 , x.shape[2] , A ):
__snake_case: Optional[int] = []
for j in range(0 , x.shape[3] , A ):
__snake_case: int = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
__snake_case: Tuple = self.encoder(A )
__snake_case: Dict = self.quant_conv(A )
row.append(A )
rows.append(A )
__snake_case: Tuple = []
for i, row in enumerate(A ):
__snake_case: str = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Optional[Any] = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: Tuple = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Tuple = torch.cat(A , dim=2 )
__snake_case: Optional[int] = DiagonalGaussianDistribution(A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=A )
def UpperCAmelCase__ ( self : Union[str, Any] , A : torch.FloatTensor , A : bool = True ):
__snake_case: Optional[Any] = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
__snake_case: str = int(self.tile_sample_min_size * self.tile_overlap_factor )
__snake_case: int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
__snake_case: List[Any] = []
for i in range(0 , z.shape[2] , A ):
__snake_case: Optional[Any] = []
for j in range(0 , z.shape[3] , A ):
__snake_case: Dict = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
__snake_case: Any = self.post_quant_conv(A )
__snake_case: Optional[Any] = self.decoder(A )
row.append(A )
rows.append(A )
__snake_case: Optional[Any] = []
for i, row in enumerate(A ):
__snake_case: Optional[Any] = []
for j, tile in enumerate(A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
__snake_case: Tuple = self.blend_v(rows[i - 1][j] , A , A )
if j > 0:
__snake_case: List[str] = self.blend_h(row[j - 1] , A , A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(A , dim=3 ) )
__snake_case: Dict = torch.cat(A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
def UpperCAmelCase__ ( self : List[Any] , A : torch.FloatTensor , A : bool = False , A : bool = True , A : Optional[torch.Generator] = None , ):
__snake_case: Optional[Any] = sample
__snake_case: Union[str, Any] = self.encode(A ).latent_dist
if sample_posterior:
__snake_case: Optional[Any] = posterior.sample(generator=A )
else:
__snake_case: Dict = posterior.mode()
__snake_case: Any = self.decode(A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A )
| 293
| 0
|
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return EnvironmentCommand()
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
@staticmethod
def UpperCAmelCase__ ( A : ArgumentParser ):
__snake_case: Optional[Any] = parser.add_parser("""env""" )
download_parser.set_defaults(func=A )
def UpperCAmelCase__ ( self : List[str] ):
__snake_case: List[str] = huggingface_hub.__version__
__snake_case: Optional[int] = """not installed"""
__snake_case: str = """NA"""
if is_torch_available():
import torch
__snake_case: Tuple = torch.__version__
__snake_case: Optional[int] = torch.cuda.is_available()
__snake_case: int = """not installed"""
if is_transformers_available():
import transformers
__snake_case: List[Any] = transformers.__version__
__snake_case: Optional[Any] = """not installed"""
if is_accelerate_available():
import accelerate
__snake_case: Tuple = accelerate.__version__
__snake_case: Union[str, Any] = """not installed"""
if is_xformers_available():
import xformers
__snake_case: int = xformers.__version__
__snake_case: int = {
"""`diffusers` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""PyTorch version (GPU?)""": f'''{pt_version} ({pt_cuda_available})''',
"""Huggingface_hub version""": hub_version,
"""Transformers version""": transformers_version,
"""Accelerate version""": accelerate_version,
"""xFormers version""": xformers_version,
"""Using GPU in script?""": """<fill in>""",
"""Using distributed or parallel set-up in script?""": """<fill in>""",
}
print("""\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n""" )
print(self.format_dict(A ) )
return info
@staticmethod
def UpperCAmelCase__ ( A : List[Any] ):
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n"
| 362
|
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase : Optional[Any] = logging.get_logger(__name__)
__UpperCAmelCase : Union[str, Any] = {
"asapp/sew-d-tiny-100k": "https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json",
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = """sew-d"""
def __init__( self : Dict , A : Any=32 , A : Dict=768 , A : Optional[Any]=12 , A : Union[str, Any]=12 , A : Union[str, Any]=3_072 , A : Optional[Any]=2 , A : Union[str, Any]=512 , A : List[Any]=256 , A : Dict=True , A : Union[str, Any]=True , A : Optional[int]=("p2c", "c2p") , A : str="layer_norm" , A : Dict="gelu_python" , A : Tuple=0.1 , A : Any=0.1 , A : Tuple=0.1 , A : Optional[int]=0.0 , A : Any=0.1 , A : Any=0.02 , A : Dict=1E-7 , A : str=1E-5 , A : int="group" , A : int="gelu" , A : str=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) , A : Union[str, Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , A : List[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , A : Optional[int]=False , A : int=128 , A : int=16 , A : Optional[Any]=True , A : List[Any]=0.05 , A : Any=10 , A : Dict=2 , A : List[Any]=0.0 , A : Union[str, Any]=10 , A : int=0 , A : List[Any]="mean" , A : Union[str, Any]=False , A : Any=False , A : Optional[int]=256 , A : List[Any]=0 , A : Any=1 , A : List[Any]=2 , **A : List[Any] , ):
super().__init__(**A , pad_token_id=A , bos_token_id=A , eos_token_id=A )
__snake_case: Optional[int] = hidden_size
__snake_case: str = feat_extract_norm
__snake_case: int = feat_extract_activation
__snake_case: str = list(A )
__snake_case: Any = list(A )
__snake_case: str = list(A )
__snake_case: Union[str, Any] = conv_bias
__snake_case: int = num_conv_pos_embeddings
__snake_case: str = num_conv_pos_embedding_groups
__snake_case: List[Any] = len(self.conv_dim )
__snake_case: List[str] = num_hidden_layers
__snake_case: Union[str, Any] = intermediate_size
__snake_case: Dict = squeeze_factor
__snake_case: List[Any] = max_position_embeddings
__snake_case: List[Any] = position_buckets
__snake_case: List[str] = share_att_key
__snake_case: int = relative_attention
__snake_case: Union[str, Any] = norm_rel_ebd
__snake_case: List[str] = list(A )
__snake_case: Tuple = hidden_act
__snake_case: List[Any] = num_attention_heads
__snake_case: str = hidden_dropout
__snake_case: int = attention_dropout
__snake_case: Dict = activation_dropout
__snake_case: Any = feat_proj_dropout
__snake_case: int = final_dropout
__snake_case: List[Any] = layer_norm_eps
__snake_case: List[str] = feature_layer_norm_eps
__snake_case: List[Any] = initializer_range
__snake_case: List[Any] = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f'''but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)'''
f'''= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case: List[Any] = apply_spec_augment
__snake_case: List[Any] = mask_time_prob
__snake_case: str = mask_time_length
__snake_case: List[str] = mask_time_min_masks
__snake_case: str = mask_feature_prob
__snake_case: Optional[int] = mask_feature_length
__snake_case: Dict = mask_feature_min_masks
# ctc loss
__snake_case: Any = ctc_loss_reduction
__snake_case: str = ctc_zero_infinity
# sequence classification
__snake_case: Optional[Any] = use_weighted_layer_sum
__snake_case: List[Any] = classifier_proj_size
@property
def UpperCAmelCase__ ( self : int ):
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 293
| 0
|
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> List[str]:
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""")
for i in range(SCREAMING_SNAKE_CASE__):
for j in range(SCREAMING_SNAKE_CASE__):
if dist[i][j] != float("""inf"""):
print(int(dist[i][j]) , end="""\t""")
else:
print("""INF""" , end="""\t""")
print()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__) -> Dict:
__snake_case: List[Any] = [[float("""inf""") for _ in range(SCREAMING_SNAKE_CASE__)] for _ in range(SCREAMING_SNAKE_CASE__)]
for i in range(SCREAMING_SNAKE_CASE__):
for j in range(SCREAMING_SNAKE_CASE__):
__snake_case: str = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(SCREAMING_SNAKE_CASE__):
# looping through rows of graph array
for i in range(SCREAMING_SNAKE_CASE__):
# looping through columns of graph array
for j in range(SCREAMING_SNAKE_CASE__):
if (
dist[i][k] != float("""inf""")
and dist[k][j] != float("""inf""")
and dist[i][k] + dist[k][j] < dist[i][j]
):
__snake_case: Optional[int] = dist[i][k] + dist[k][j]
_print_dist(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return dist, v
if __name__ == "__main__":
__UpperCAmelCase : int = int(input("Enter number of vertices: "))
__UpperCAmelCase : Optional[Any] = int(input("Enter number of edges: "))
__UpperCAmelCase : List[str] = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__UpperCAmelCase : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__UpperCAmelCase : Optional[Any] = int(input("Enter source:"))
__UpperCAmelCase : Optional[int] = int(input("Enter destination:"))
__UpperCAmelCase : Union[str, Any] = float(input("Enter weight:"))
__UpperCAmelCase : str = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 363
|
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__UpperCAmelCase : Any = random.Random()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=1.0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None) -> Any:
if rng is None:
__snake_case: Dict = global_rng
__snake_case: str = []
for batch_idx in range(shape[0]):
values.append([])
for _ in range(shape[1]):
values[-1].append(rng.random() * scale)
return values
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int , A : List[str] , A : List[Any]=7 , A : Optional[int]=400 , A : List[Any]=2_000 , A : Dict=2_048 , A : Tuple=128 , A : List[Any]=1 , A : Tuple=512 , A : str=30 , A : Optional[Any]=44_100 , ):
__snake_case: Dict = parent
__snake_case: Optional[Any] = batch_size
__snake_case: Optional[int] = min_seq_length
__snake_case: Optional[Any] = max_seq_length
__snake_case: List[str] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__snake_case: Any = spectrogram_length
__snake_case: Any = feature_size
__snake_case: Union[str, Any] = num_audio_channels
__snake_case: Any = hop_length
__snake_case: List[str] = chunk_length
__snake_case: Any = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : List[str] , A : str=False , A : int=False ):
def _flatten(A : Dict ):
return list(itertools.chain(*A ) )
if equal_length:
__snake_case: List[str] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__snake_case: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__snake_case: Tuple = [np.asarray(A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( __lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = TvltFeatureExtractor
def UpperCAmelCase__ ( self : Union[str, Any] ):
__snake_case: str = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : int ):
__snake_case: Tuple = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(A , """spectrogram_length""" ) )
self.assertTrue(hasattr(A , """feature_size""" ) )
self.assertTrue(hasattr(A , """num_audio_channels""" ) )
self.assertTrue(hasattr(A , """hop_length""" ) )
self.assertTrue(hasattr(A , """chunk_length""" ) )
self.assertTrue(hasattr(A , """sampling_rate""" ) )
def UpperCAmelCase__ ( self : Any ):
__snake_case: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: Tuple = feat_extract_first.save_pretrained(A )[0]
check_json_file_has_correct_format(A )
__snake_case: int = self.feature_extraction_class.from_pretrained(A )
__snake_case: List[str] = feat_extract_first.to_dict()
__snake_case: str = feat_extract_second.to_dict()
__snake_case: List[Any] = dict_first.pop("""mel_filters""" )
__snake_case: str = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Optional[Any] ):
__snake_case: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__snake_case: str = os.path.join(A , """feat_extract.json""" )
feat_extract_first.to_json_file(A )
__snake_case: List[Any] = self.feature_extraction_class.from_json_file(A )
__snake_case: Dict = feat_extract_first.to_dict()
__snake_case: Any = feat_extract_second.to_dict()
__snake_case: int = dict_first.pop("""mel_filters""" )
__snake_case: int = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(A , A ) )
self.assertEqual(A , A )
def UpperCAmelCase__ ( self : Any ):
# Initialize feature_extractor
__snake_case: Union[str, Any] = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__snake_case: Dict = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
__snake_case: str = [np.asarray(A ) for speech_input in speech_inputs]
# Test not batched input
__snake_case: int = feature_extractor(np_speech_inputs[0] , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__snake_case: Optional[int] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__snake_case: Union[str, Any] = feature_extractor(
A , return_tensors="""np""" , sampling_rate=44_100 , mask_audio=A ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__snake_case: Any = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__snake_case: Union[str, Any] = np.asarray(A )
__snake_case: List[Any] = feature_extractor(A , return_tensors="""np""" , sampling_rate=44_100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : Union[str, Any] , A : List[str] ):
__snake_case: Tuple = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" , """clean""" , split="""validation""" )
# automatic decoding with librispeech
__snake_case: List[Any] = ds.sort("""id""" ).select(range(A ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = self._load_datasamples(1 )
__snake_case: Optional[int] = TvltFeatureExtractor()
__snake_case: Optional[Any] = feature_extractor(A , return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
__snake_case: str = torch.tensor([[-0.3032, -0.2708], [-0.4434, -0.4007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , A , atol=1E-4 ) )
| 293
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.