code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
from collections.abc import Generator
def a__ () -> Generator[int, None, None]:
_A : dict[int, int] = {}
_A : Union[str, Any] = 2
while True:
_A : Optional[int] = factor_map.pop(__lowercase , __lowercase )
if factor:
_A : Dict = factor + prime
while x in factor_map:
x += factor
_A : Dict = factor
else:
_A : Union[str, Any] = prime
yield prime
prime += 1
def a__ (__lowercase :float = 1e10 ) -> int:
_A : int = sieve()
_A : str = 1
while True:
_A : Any = next(__lowercase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(__lowercase )
n += 2
if __name__ == "__main__":
print(solution())
| 206 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : List[Any] =logging.get_logger(__name__)
_UpperCamelCase : Optional[Any] ={
'google/pegasus-large': 'https://huggingface.co/google/pegasus-large/resolve/main/config.json',
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class UpperCAmelCase__ ( __snake_case ):
__snake_case : Optional[int] = "pegasus"
__snake_case : List[str] = ["past_key_values"]
__snake_case : Union[str, Any] = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self ,A__=50265 ,A__=1024 ,A__=12 ,A__=4096 ,A__=16 ,A__=12 ,A__=4096 ,A__=16 ,A__=0.0 ,A__=0.0 ,A__=True ,A__=True ,A__="gelu" ,A__=1024 ,A__=0.1 ,A__=0.0 ,A__=0.0 ,A__=0.02 ,A__=0 ,A__=False ,A__=0 ,A__=1 ,A__=1 ,**A__ ,):
_A : List[str] = vocab_size
_A : List[Any] = max_position_embeddings
_A : Union[str, Any] = d_model
_A : Union[str, Any] = encoder_ffn_dim
_A : Union[str, Any] = encoder_layers
_A : int = encoder_attention_heads
_A : List[Any] = decoder_ffn_dim
_A : Optional[int] = decoder_layers
_A : Tuple = decoder_attention_heads
_A : List[str] = dropout
_A : Tuple = attention_dropout
_A : Union[str, Any] = activation_dropout
_A : int = activation_function
_A : Union[str, Any] = init_std
_A : Any = encoder_layerdrop
_A : int = decoder_layerdrop
_A : Tuple = use_cache
_A : Dict = encoder_layers
_A : List[str] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=A__ ,eos_token_id=A__ ,is_encoder_decoder=A__ ,decoder_start_token_id=A__ ,forced_eos_token_id=A__ ,**A__ ,)
@property
def A__ ( self ):
return self.encoder_attention_heads
@property
def A__ ( self ):
return self.d_model
| 206 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""microsoft/resnet-50""": """https://huggingface.co/microsoft/resnet-50/blob/main/config.json""",
}
class _a ( lowercase_ , lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """resnet"""
UpperCamelCase__ = ["""basic""", """bottleneck"""]
def __init__( self , UpperCAmelCase_=3 , UpperCAmelCase_=64 , UpperCAmelCase_=[256, 512, 1_024, 2_048] , UpperCAmelCase_=[3, 4, 6, 3] , UpperCAmelCase_="bottleneck" , UpperCAmelCase_="relu" , UpperCAmelCase_=False , UpperCAmelCase_=None , UpperCAmelCase_=None , **UpperCAmelCase_ , ) -> Any:
'''simple docstring'''
super().__init__(**UpperCAmelCase_)
if layer_type not in self.layer_types:
raise ValueError(F"""layer_type={layer_type} is not one of {','.join(self.layer_types)}""")
lowercase__: Union[str, Any] = num_channels
lowercase__: Tuple = embedding_size
lowercase__: Union[str, Any] = hidden_sizes
lowercase__: Union[str, Any] = depths
lowercase__: List[str] = layer_type
lowercase__: Tuple = hidden_act
lowercase__: Tuple = downsample_in_first_stage
lowercase__: int = ["stem"] + [F"""stage{idx}""" for idx in range(1 , len(UpperCAmelCase_) + 1)]
lowercase__ , lowercase__: Any = get_aligned_output_features_output_indices(
out_features=UpperCAmelCase_ , out_indices=UpperCAmelCase_ , stage_names=self.stage_names)
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = version.parse("""1.11""" )
@property
def __lowercase ( self) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
])
@property
def __lowercase ( self) -> float:
'''simple docstring'''
return 1E-3
| 120 |
"""simple docstring"""
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class _a ( lowercase_ ):
'''simple docstring'''
UpperCamelCase__ = """ernie_m"""
UpperCamelCase__ = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self , UpperCAmelCase_ = 250_002 , UpperCAmelCase_ = 768 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 12 , UpperCAmelCase_ = 3_072 , UpperCAmelCase_ = "gelu" , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 0.1 , UpperCAmelCase_ = 514 , UpperCAmelCase_ = 0.02 , UpperCAmelCase_ = 1 , UpperCAmelCase_ = 1E-0_5 , UpperCAmelCase_=None , UpperCAmelCase_=False , UpperCAmelCase_=0.0 , **UpperCAmelCase_ , ) -> Dict:
'''simple docstring'''
super().__init__(pad_token_id=UpperCAmelCase_ , **UpperCAmelCase_)
lowercase__: Union[str, Any] = vocab_size
lowercase__: List[Any] = hidden_size
lowercase__: List[Any] = num_hidden_layers
lowercase__: Tuple = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: List[Any] = hidden_act
lowercase__: Optional[Any] = hidden_dropout_prob
lowercase__: str = attention_probs_dropout_prob
lowercase__: Tuple = max_position_embeddings
lowercase__: str = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: List[str] = classifier_dropout
lowercase__: Optional[Any] = is_decoder
lowercase__: Tuple = act_dropout
| 120 | 1 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
_lowerCAmelCase = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow("", "|", "|"),
datarow=DataRow("", "|", "|"),
padding=1,
with_header_hide=None,
)
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = {"type": "section", "text": {"type": "plain_text", "text": "No failed tests! 🤗", "emoji": True}}
_lowerCAmelCase = [
{
"type": "header",
"text": {
"type": "plain_text",
"text": f'🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results',
"emoji": True,
},
}
]
_lowerCAmelCase = 0
for log in Path().glob("*.log"):
_lowerCAmelCase = 0
with open(log, "r") as f:
for line in f:
_lowerCAmelCase = json.loads(line)
if line.get("nodeid", "") != "":
_lowerCAmelCase = line["nodeid"]
if line.get("duration", None) is not None:
_lowerCAmelCase = f'{line["duration"]:.4f}'
if line.get("outcome", "") == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split("_")[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
_lowerCAmelCase = []
log.unlink()
_lowerCAmelCase = ""
_lowerCAmelCase = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
_lowerCAmelCase = []
_lowerCAmelCase = {}
for test in failed_tests:
_lowerCAmelCase = test[0].split("::")
_lowerCAmelCase = data[0].split("/")[-1]
if data[0] not in filesafailed:
_lowerCAmelCase = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
_lowerCAmelCase = [test[0] for test in failed_table]
_lowerCAmelCase = list(set(files))
# Count number of instances in failed_tests
_lowerCAmelCase = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
_lowerCAmelCase = tabulate(
table,
headers=["Test Location", "Num Failed"],
tablefmt=hf_table_format,
stralign="right",
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
_lowerCAmelCase = "Too many failed tests, please see the full report in the Action results."
_lowerCAmelCase = len(err) + 10
_lowerCAmelCase = message[: 3_000 - offset] + f'\n...\n```\n{err}'
print(f'### {message}')
else:
_lowerCAmelCase = "No failed tests! 🤗"
print(f'## {message}')
payload.append(no_error_payload)
if os.environ.get("TEST_TYPE", "") != "":
from slack_sdk import WebClient
_lowerCAmelCase = WebClient(token=os.environ["SLACK_API_TOKEN"])
if message != "No failed tests! 🤗":
_lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": message,
},
}
payload.append(md_report)
_lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": "*For more details:*",
},
"accessory": {
"type": "button",
"text": {
"type": "plain_text",
"text": "Check Action results",
"emoji": True,
},
"url": f'https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}',
},
}
payload.append(action_button)
_lowerCAmelCase = {
"type": "context",
"elements": [
{
"type": "plain_text",
"text": f'Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}',
}
],
}
payload.append(date_report)
_lowerCAmelCase = client.chat_postMessage(channel="#accelerate-ci-daily", text=message, blocks=payload)
_lowerCAmelCase = response.data["ts"]
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
_lowerCAmelCase = ""
for i, row in enumerate(test_failures):
if row[0] != test_class:
_lowerCAmelCase = row[0]
else:
_lowerCAmelCase = ""
_lowerCAmelCase = {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f'Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```',
},
}
client.chat_postMessage(
channel="#accelerate-ci-daily",
thread_ts=ts,
blocks=[payload],
)
| 10 | import unittest
from transformers import DebertaVaTokenizer, DebertaVaTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
_lowerCAmelCase = get_tests_dir("fixtures/spiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( __lowercase, unittest.TestCase ):
UpperCAmelCase = DebertaVaTokenizer
UpperCAmelCase = DebertaVaTokenizerFast
UpperCAmelCase = True
UpperCAmelCase = True
def UpperCamelCase_ ( self : List[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCamelCase = DebertaVaTokenizer(_A , unk_token='''<unk>''' )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Dict , _A : Union[str, Any] ):
_UpperCamelCase = '''this is a test'''
_UpperCamelCase = '''this is a test'''
return input_text, output_text
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = '''<pad>'''
_UpperCamelCase = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<pad>''' )
self.assertEqual(vocab_keys[1] , '''<unk>''' )
self.assertEqual(vocab_keys[-1] , '''[PAD]''' )
self.assertEqual(len(_A ) , 3_0001 )
def UpperCamelCase_ ( self : List[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 3_0000 )
def UpperCamelCase_ ( self : List[str] ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁hello''', '''!''', '''how''', '''▁are''', '''▁you''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Dict ):
pass
@unittest.skip('''There is an inconsistency between slow and fast tokenizer due to a bug in the fast one.''' )
def UpperCamelCase_ ( self : Optional[Any] ):
pass
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[Any] ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁i''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : int ):
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', '''▁''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''▁''', '''.''', ]
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Tuple ):
# fmt: off
_UpperCamelCase = ''' \tHeLLo!how \n Are yoU? '''
_UpperCamelCase = ['''▁''', '''<unk>''', '''e''', '''<unk>''', '''o''', '''!''', '''how''', '''▁''', '''<unk>''', '''re''', '''▁yo''', '''<unk>''', '''?''']
# fmt: on
_UpperCamelCase = DebertaVaTokenizer(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , do_lower_case=_A , split_by_punct=_A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = tokenizer.convert_ids_to_tokens(tokenizer.encode(_A , add_special_tokens=_A ) )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(rust_tokenizer.encode(_A , add_special_tokens=_A ) )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = self.get_rust_tokenizer()
_UpperCamelCase = tokenizer.encode(_A )
_UpperCamelCase = rust_tokenizer.encode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = '''This is a test'''
_UpperCamelCase = [13, 1, 4398, 25, 21, 1289]
_UpperCamelCase = ['''▁''', '''T''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = ['''▁''', '''<unk>''', '''his''', '''▁is''', '''▁a''', '''▁test''']
_UpperCamelCase = DebertaVaTokenizer(_A , keep_accents=_A )
_UpperCamelCase = DebertaVaTokenizerFast(_A , keep_accents=_A )
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
# fmt: off
_UpperCamelCase = '''I was born in 92000, and this is falsé.'''
_UpperCamelCase = [13, 1, 23, 386, 19, 561, 3050, 15, 17, 48, 25, 8256, 18, 1, 9]
_UpperCamelCase = ['''▁''', '''I''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''é''', '''.''', ]
_UpperCamelCase = ['''▁''', '''<unk>''', '''▁was''', '''▁born''', '''▁in''', '''▁9''', '''2000''', ''',''', '''▁and''', '''▁this''', '''▁is''', '''▁fal''', '''s''', '''<unk>''', '''.''', ]
# fmt: on
_UpperCamelCase = tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.tokenize(_A )
self.assertListEqual(_A , _A )
_UpperCamelCase = rust_tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = DebertaVaTokenizer(_A )
_UpperCamelCase = tokenizer.encode('''sequence builders''' )
_UpperCamelCase = tokenizer.encode('''multi-sequence build''' )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A )
_UpperCamelCase = tokenizer.build_inputs_with_special_tokens(_A , _A )
self.assertEqual([tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] , _A )
self.assertEqual(
[tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [tokenizer.sep_token_id] , _A , )
@slow
def UpperCamelCase_ ( self : Optional[Any] ):
# fmt: off
_UpperCamelCase = {'''input_ids''': [[1, 3_9867, 36, 1_9390, 486, 27, 3_5052, 8_1436, 18, 6_0685, 1225, 7, 3_5052, 8_1436, 18, 9367, 1_6899, 18, 1_5937, 53, 594, 773, 18, 1_6287, 3_0465, 36, 1_5937, 6, 4_1139, 38, 3_6979, 6_0763, 191, 6, 3_4132, 99, 6, 5_0538, 390, 4_3230, 6, 3_4132, 2779, 2_0850, 14, 699, 1072, 1194, 36, 382, 1_0901, 53, 7, 699, 1072, 2084, 36, 2_0422, 630, 53, 19, 105, 3049, 1896, 1053, 1_6899, 1506, 11, 3_7978, 4243, 7, 1237, 3_1869, 200, 1_6566, 654, 6, 3_5052, 8_1436, 7, 5_5630, 1_3593, 4, 2], [1, 26, 1_5011, 13, 667, 8, 1053, 18, 2_3611, 1237, 7_2356, 1_2820, 34, 10_4134, 1209, 35, 1_3313, 6627, 21, 202, 347, 7, 164, 2399, 11, 46, 4485, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 5, 1232, 2864, 1_5785, 1_4951, 105, 5, 8581, 1250, 4, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''token_type_ids''': [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='''microsoft/deberta-v2-xlarge''' , revision='''ad6e42c1532ddf3a15c39246b63f5559d558b670''' , )
| 10 | 1 |
"""simple docstring"""
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_lowercase : List[str] = 'bert-base-cased'
_lowercase : Optional[int] = 'google/pegasus-xsum'
_lowercase : List[Any] = [' Sam ate lunch today.', 'Sams lunch ingredients.']
_lowercase : str = ['A very interesting story about what I ate for lunch.', 'Avocado, celery, turkey, coffee']
_lowercase : Tuple = 'patrickvonplaten/t5-tiny-random'
_lowercase : List[str] = 'sshleifer/bart-tiny-random'
_lowercase : Tuple = 'sshleifer/tiny-mbart'
_lowercase : List[Any] = 'sshleifer/tiny-marian-en-de'
def lowercase__ ( snake_case_ :Path , snake_case_ :list ):
__UpperCAmelCase = '''\n'''.join(snake_case_ )
Path(snake_case_ ).open('''w''' ).writelines(snake_case_ )
def lowercase__ ( snake_case_ :Dict ):
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(snake_case_ , F'''{split}.source''' ) , snake_case_ )
_dump_articles(os.path.join(snake_case_ , F'''{split}.target''' ) , snake_case_ )
return tmp_dir
class _UpperCAmelCase ( _lowerCAmelCase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def a ( self : Tuple , _lowercase : Optional[int] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__UpperCAmelCase = max(len(tokenizer.encode(_lowercase ) ) for a in ARTICLES )
__UpperCAmelCase = max(len(tokenizer.encode(_lowercase ) ) for a in SUMMARIES )
__UpperCAmelCase = 4
__UpperCAmelCase = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
__UpperCAmelCase , __UpperCAmelCase = '''ro_RO''', '''de_DE''' # ignored for all but mbart, but never causes error.
__UpperCAmelCase = SeqaSeqDataset(
_lowercase , data_dir=_lowercase , type_path='''train''' , max_source_length=_lowercase , max_target_length=_lowercase , src_lang=_lowercase , tgt_lang=_lowercase , )
__UpperCAmelCase = DataLoader(_lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(_lowercase , _lowercase )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
__UpperCAmelCase = shift_tokens_right(batch['''labels'''] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def a ( self : List[Any] , _lowercase : Dict ):
__UpperCAmelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCAmelCase = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
__UpperCAmelCase = max(len(tokenizer.encode(_lowercase ) ) for a in ARTICLES )
__UpperCAmelCase = max(len(tokenizer.encode(_lowercase ) ) for a in SUMMARIES )
__UpperCAmelCase = 4
__UpperCAmelCase = LegacySeqaSeqDataset(
_lowercase , data_dir=_lowercase , type_path='''train''' , max_source_length=20 , max_target_length=_lowercase , )
__UpperCAmelCase = DataLoader(_lowercase , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''facebook/mbart-large-cc25''' )
__UpperCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
__UpperCAmelCase = tmp_dir.joinpath('''train.source''' ).open().readlines()
__UpperCAmelCase = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(_lowercase , _lowercase , 1_28 , _lowercase )
__UpperCAmelCase = {x.name for x in tmp_dir.iterdir()}
__UpperCAmelCase = {x.name for x in save_dir.iterdir()}
__UpperCAmelCase = save_dir.joinpath('''train.source''' ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(_lowercase ) < len(_lowercase )
assert len(_lowercase ) == 1
assert len(packed_examples[0] ) == sum(len(_lowercase ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason='''This test requires fairseq''' )
def a ( self : Tuple ):
if not FAIRSEQ_AVAILABLE:
return
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_dataset(max_len=64 )
__UpperCAmelCase = 64
__UpperCAmelCase = ds.make_dynamic_sampler(_lowercase , required_batch_size_multiple=_lowercase )
__UpperCAmelCase = [len(_lowercase ) for x in batch_sampler]
assert len(set(_lowercase ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(_lowercase ) == len(_lowercase ) # no dropped or added examples
__UpperCAmelCase = DataLoader(_lowercase , batch_sampler=_lowercase , collate_fn=ds.collate_fn , num_workers=2 )
__UpperCAmelCase = []
__UpperCAmelCase = []
for batch in data_loader:
__UpperCAmelCase = batch['''input_ids'''].shape
__UpperCAmelCase = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
__UpperCAmelCase = np.product(batch['''input_ids'''].shape )
num_src_per_batch.append(_lowercase )
if num_src_tokens > (max_tokens * 1.1):
failures.append(_lowercase )
assert num_src_per_batch[0] == max(_lowercase )
if failures:
raise AssertionError(F'''too many tokens in {len(_lowercase )} batches''' )
def a ( self : Optional[int] ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_dataset(max_len=5_12 )
__UpperCAmelCase = 2
__UpperCAmelCase = ds.make_sortish_sampler(_lowercase , shuffle=_lowercase )
__UpperCAmelCase = DataLoader(_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn , num_workers=2 )
__UpperCAmelCase = DataLoader(_lowercase , batch_size=_lowercase , collate_fn=ds.collate_fn , num_workers=2 , sampler=_lowercase )
__UpperCAmelCase = tokenizer.pad_token_id
def count_pad_tokens(_lowercase : Union[str, Any] , _lowercase : List[Any]="input_ids" ):
return [batch[k].eq(_lowercase ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(_lowercase , k='''labels''' ) ) < sum(count_pad_tokens(_lowercase , k='''labels''' ) )
assert sum(count_pad_tokens(_lowercase ) ) < sum(count_pad_tokens(_lowercase ) )
assert len(_lowercase ) == len(_lowercase )
def a ( self : Tuple , _lowercase : str=10_00 , _lowercase : Union[str, Any]=1_28 ):
if os.getenv('''USE_REAL_DATA''' , _lowercase ):
__UpperCAmelCase = '''examples/seq2seq/wmt_en_ro'''
__UpperCAmelCase = max_len * 2 * 64
if not Path(_lowercase ).joinpath('''train.len''' ).exists():
save_len_file(_lowercase , _lowercase )
else:
__UpperCAmelCase = '''examples/seq2seq/test_data/wmt_en_ro'''
__UpperCAmelCase = max_len * 4
save_len_file(_lowercase , _lowercase )
__UpperCAmelCase = AutoTokenizer.from_pretrained(_lowercase )
__UpperCAmelCase = SeqaSeqDataset(
_lowercase , data_dir=_lowercase , type_path='''train''' , max_source_length=_lowercase , max_target_length=_lowercase , n_obs=_lowercase , )
return ds, max_tokens, tokenizer
def a ( self : Optional[Any] ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = self._get_dataset()
__UpperCAmelCase = set(DistributedSortishSampler(_lowercase , 2_56 , num_replicas=2 , rank=0 , add_extra_examples=_lowercase ) )
__UpperCAmelCase = set(DistributedSortishSampler(_lowercase , 2_56 , num_replicas=2 , rank=1 , add_extra_examples=_lowercase ) )
assert idsa.intersection(_lowercase ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def a ( self : Optional[int] , _lowercase : Any ):
__UpperCAmelCase = AutoTokenizer.from_pretrained(_lowercase , use_fast=_lowercase )
if tok_name == MBART_TINY:
__UpperCAmelCase = SeqaSeqDataset(
_lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , src_lang='''EN''' , tgt_lang='''FR''' , )
__UpperCAmelCase = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
__UpperCAmelCase = SeqaSeqDataset(
_lowercase , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path='''train''' , max_source_length=4 , max_target_length=8 , )
__UpperCAmelCase = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(_lowercase ) == 1 if tok_name == BART_TINY else len(_lowercase ) == 0
| 710 |
"""simple docstring"""
from __future__ import annotations
def lowercase__ ( snake_case_ :list[int] ):
__UpperCAmelCase = len(snake_case_ ) // 2
# choose the middle 3 elements
__UpperCAmelCase = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 397 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
SCREAMING_SNAKE_CASE_ = """examples/"""
SCREAMING_SNAKE_CASE_ = {
"""examples""": (re.compile(r"""^check_min_version\(\"[^\"]+\"\)\s*$""", re.MULTILINE), """check_min_version(\"VERSION\")\n"""),
"""init""": (re.compile(r"""^__version__\s+=\s+\"([^\"]+)\"\s*$""", re.MULTILINE), """__version__ = \"VERSION\"\n"""),
"""setup""": (re.compile(r"""^(\s*)version\s*=\s*\"[^\"]+\",""", re.MULTILINE), r"""\1version=\"VERSION\","""),
"""doc""": (re.compile(r"""^(\s*)release\s*=\s*\"[^\"]+\"$""", re.MULTILINE), """release = \"VERSION\"\n"""),
}
SCREAMING_SNAKE_CASE_ = {
"""init""": """src/diffusers/__init__.py""",
"""setup""": """setup.py""",
}
SCREAMING_SNAKE_CASE_ = """README.md"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) -> Optional[int]:
with open(SCREAMING_SNAKE_CASE__, "r", encoding="utf-8", newline="\n" ) as f:
a_ : Union[str, Any] = f.read()
a_ , a_ : List[str] = REPLACE_PATTERNS[pattern]
a_ : List[Any] = replace.replace("VERSION", SCREAMING_SNAKE_CASE__ )
a_ : Union[str, Any] = re_pattern.sub(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__, "w", encoding="utf-8", newline="\n" ) as f:
f.write(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
for folder, directories, fnames in os.walk(SCREAMING_SNAKE_CASE__ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ), SCREAMING_SNAKE_CASE__, pattern="examples" )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ )
if not patch:
update_version_in_examples(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> Union[str, Any]:
a_ : int = "🤗 Transformers currently provides the following architectures"
a_ : str = "1. Want to contribute a new model?"
with open(SCREAMING_SNAKE_CASE__, "r", encoding="utf-8", newline="\n" ) as f:
a_ : Union[str, Any] = f.readlines()
# Find the start of the list.
a_ : List[str] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
a_ : List[Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
a_ : str = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc", "https://huggingface.co/docs/diffusers/model_doc", )
index += 1
with open(SCREAMING_SNAKE_CASE__, "w", encoding="utf-8", newline="\n" ) as f:
f.writelines(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> Optional[Any]:
with open(REPLACE_FILES["init"], "r" ) as f:
a_ : Dict = f.read()
a_ : Union[str, Any] = REPLACE_PATTERNS["init"][0].search(SCREAMING_SNAKE_CASE__ ).groups()[0]
return packaging.version.parse(SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__=False ) -> int:
a_ : Optional[Any] = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
a_ : Optional[int] = default_version.base_version
elif patch:
a_ : List[Any] = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
a_ : str = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
a_ : Any = input(F"""Which version are you releasing? [{default_version}]""" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
a_ : Optional[Any] = default_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE__, patch=SCREAMING_SNAKE_CASE__ )
def lowerCAmelCase_ ( ) -> List[str]:
a_ : Optional[Any] = get_version()
a_ : Optional[int] = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
a_ : Dict = current_version.base_version
# Check with the user we got that right.
a_ : Any = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(SCREAMING_SNAKE_CASE__ ) == 0:
a_ : int = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(SCREAMING_SNAKE_CASE__ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
parser.add_argument("""--post_release""", action="""store_true""", help="""Whether this is pre or post release.""")
parser.add_argument("""--patch""", action="""store_true""", help="""Whether or not this is a patch release.""")
SCREAMING_SNAKE_CASE_ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("""Nothing to do after a patch :-)""")
else:
post_release_work() | 237 |
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = """ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"""
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> bytes:
# Make sure the supplied data is a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
a_ : Union[str, Any] = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(SCREAMING_SNAKE_CASE__ )
a_ : Optional[Any] = "".join(bin(SCREAMING_SNAKE_CASE__ )[2:].zfill(8 ) for byte in data )
a_ : Tuple = len(SCREAMING_SNAKE_CASE__ ) % 6 != 0
if padding_needed:
# The padding that will be added later
a_ : List[Any] = B"=" * ((6 - len(SCREAMING_SNAKE_CASE__ ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(SCREAMING_SNAKE_CASE__ ) % 6)
else:
a_ : List[Any] = B""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6], 2 )]
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 6 ) ).encode()
+ padding
)
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE__ ) -> bytes:
# Make sure encoded_data is either a string or a bytes-like object
if not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
a_ : int = (
"argument should be a bytes-like object or ASCII string, "
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(SCREAMING_SNAKE_CASE__ )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__ ):
try:
a_ : List[Any] = encoded_data.decode("utf-8" )
except UnicodeDecodeError:
raise ValueError("base64 encoded data should only contain ASCII characters" )
a_ : Union[str, Any] = encoded_data.count("=" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(SCREAMING_SNAKE_CASE__ ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
a_ : List[str] = encoded_data[:-padding]
a_ : Optional[int] = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
a_ : Optional[int] = "".join(
bin(B64_CHARSET.index(SCREAMING_SNAKE_CASE__ ) )[2:].zfill(6 ) for char in encoded_data )
a_ : Union[str, Any] = [
int(binary_stream[index : index + 8], 2 )
for index in range(0, len(SCREAMING_SNAKE_CASE__ ), 8 )
]
return bytes(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 237 | 1 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowercase_ = logging.get_logger(__name__)
class A__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *lowerCamelCase , **lowerCamelCase ) -> None:
"""simple docstring"""
warnings.warn(
'''The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use SegformerImageProcessor instead.''' , lowerCamelCase , )
super().__init__(*lowerCamelCase , **lowerCamelCase )
| 336 |
from __future__ import annotations
class A__ :
def __init__( self , lowerCamelCase ) -> None:
"""simple docstring"""
__magic_name__ : List[str] = data
__magic_name__ : Node | None = None
__magic_name__ : Node | None = None
def lowerCAmelCase ( UpperCAmelCase ) ->None: # In Order traversal of the tree
"""simple docstring"""
if tree:
display(tree.left )
print(tree.data )
display(tree.right )
def lowerCAmelCase ( UpperCAmelCase ) ->int:
"""simple docstring"""
return 1 + max(depth_of_tree(tree.left ), depth_of_tree(tree.right ) ) if tree else 0
def lowerCAmelCase ( UpperCAmelCase ) ->bool:
"""simple docstring"""
if not tree:
return True
if tree.left and tree.right:
return is_full_binary_tree(tree.left ) and is_full_binary_tree(tree.right )
else:
return not tree.left and not tree.right
def lowerCAmelCase ( ) ->None: # Main function for testing.
"""simple docstring"""
__magic_name__ : Tuple = Node(1 )
__magic_name__ : Union[str, Any] = Node(2 )
__magic_name__ : Tuple = Node(3 )
__magic_name__ : List[str] = Node(4 )
__magic_name__ : str = Node(5 )
__magic_name__ : List[Any] = Node(6 )
__magic_name__ : Optional[int] = Node(7 )
__magic_name__ : str = Node(8 )
__magic_name__ : str = Node(9 )
print(is_full_binary_tree(UpperCAmelCase ) )
print(depth_of_tree(UpperCAmelCase ) )
print('''Tree is: ''' )
display(UpperCAmelCase )
if __name__ == "__main__":
main()
| 336 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__magic_name__ : List[Any] =logging.get_logger(__name__)
__magic_name__ : int ={
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class UpperCamelCase_ ( A ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = '''imagegpt'''
UpperCAmelCase__ : Optional[int] = ['''past_key_values''']
UpperCAmelCase__ : str = {
'''hidden_size''': '''n_embd''',
'''max_position_embeddings''': '''n_positions''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , _lowerCamelCase : List[Any]=5_12 + 1 , _lowerCamelCase : List[str]=32 * 32 , _lowerCamelCase : Optional[int]=5_12 , _lowerCamelCase : List[str]=24 , _lowerCamelCase : Optional[int]=8 , _lowerCamelCase : str=None , _lowerCamelCase : str="quick_gelu" , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : Any=0.1 , _lowerCamelCase : Tuple=1e-5 , _lowerCamelCase : List[Any]=0.02 , _lowerCamelCase : Tuple=True , _lowerCamelCase : List[str]=True , _lowerCamelCase : List[str]=False , _lowerCamelCase : List[str]=False , _lowerCamelCase : Any=False , **_lowerCamelCase : List[Any] , ) -> Optional[Any]:
__magic_name__ = vocab_size
__magic_name__ = n_positions
__magic_name__ = n_embd
__magic_name__ = n_layer
__magic_name__ = n_head
__magic_name__ = n_inner
__magic_name__ = activation_function
__magic_name__ = resid_pdrop
__magic_name__ = embd_pdrop
__magic_name__ = attn_pdrop
__magic_name__ = layer_norm_epsilon
__magic_name__ = initializer_range
__magic_name__ = scale_attn_weights
__magic_name__ = use_cache
__magic_name__ = scale_attn_by_inverse_layer_idx
__magic_name__ = reorder_and_upcast_attn
__magic_name__ = tie_word_embeddings
super().__init__(tie_word_embeddings=_lowerCamelCase , **_lowerCamelCase )
class UpperCamelCase_ ( A ):
"""simple docstring"""
@property
def __A ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
("input_ids", {0: "batch", 1: "sequence"}),
] )
def __A ( self : Dict , _lowerCamelCase : "FeatureExtractionMixin" , _lowerCamelCase : int = 1 , _lowerCamelCase : int = -1 , _lowerCamelCase : bool = False , _lowerCamelCase : Optional["TensorType"] = None , _lowerCamelCase : int = 3 , _lowerCamelCase : int = 32 , _lowerCamelCase : int = 32 , ) -> Mapping[str, Any]:
__magic_name__ = self._generate_dummy_images(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
__magic_name__ = dict(preprocessor(images=_lowerCamelCase , return_tensors=_lowerCamelCase ) )
return inputs
| 664 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
__magic_name__ : Optional[int] =logging.get_logger(__name__)
__magic_name__ : Tuple ='The Nymphenburg Palace is a beautiful palace in Munich!'
def __snake_case ( lowerCamelCase_ : str , lowerCamelCase_ : str ):
'''simple docstring'''
__magic_name__ = {
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
__magic_name__ = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ = BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=lowerCamelCase_ , output_all_encodings=lowerCamelCase_ , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , lowerCamelCase_ ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ = "openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
__magic_name__ = os.path.join(get_home_dir() , "models" )
__magic_name__ = _load_vocab(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , cls=lowerCamelCase_ )
__magic_name__ = nlp.model.BERTModel(
lowerCamelCase_ , len(lowerCamelCase_ ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=lowerCamelCase_ , use_token_type_embed=lowerCamelCase_ , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=lowerCamelCase_ , use_decoder=lowerCamelCase_ , )
original_bort.load_parameters(lowerCamelCase_ , cast_dtype=lowerCamelCase_ , ignore_extra=lowerCamelCase_ )
__magic_name__ = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ = {
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(lowerCamelCase_ ),
}
__magic_name__ = BertConfig.from_dict(lowerCamelCase_ )
__magic_name__ = BertForMaskedLM(lowerCamelCase_ )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCamelCase_ : Any ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int ):
__magic_name__ = hf_param.shape
__magic_name__ = to_torch(params[gluon_param] )
__magic_name__ = gluon_param.shape
assert (
shape_hf == shape_gluon
), F'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
__magic_name__ = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ = layer.attention.self
__magic_name__ = check_and_map_params(
self_attn.key.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ = check_and_map_params(
self_attn.key.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ = check_and_map_params(
self_attn.query.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ = check_and_map_params(
self_attn.query.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ = check_and_map_params(
self_attn.value.bias.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ = check_and_map_params(
self_attn.value.weight.data , F'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ = layer.attention.output
__magic_name__ = check_and_map_params(
self_output.dense.bias , F'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ = check_and_map_params(
self_output.dense.weight , F'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ = check_and_map_params(
self_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ = layer.intermediate
__magic_name__ = check_and_map_params(
intermediate.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ = check_and_map_params(
intermediate.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ = layer.output
__magic_name__ = check_and_map_params(
bert_output.dense.bias , F'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ = check_and_map_params(
bert_output.dense.weight , F'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.bias , F'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ = check_and_map_params(
bert_output.LayerNorm.weight , F'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ = RobertaTokenizer.from_pretrained("roberta-base" )
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ )["input_ids"]
# Get gluon output
__magic_name__ = mx.nd.array([input_ids] )
__magic_name__ = original_bort(inputs=lowerCamelCase_ , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCamelCase_ )
__magic_name__ = BertModel.from_pretrained(lowerCamelCase_ )
hf_bort_model.eval()
__magic_name__ = tokenizer.encode_plus(lowerCamelCase_ , return_tensors="pt" )
__magic_name__ = hf_bort_model(**lowerCamelCase_ )[0]
__magic_name__ = output_gluon[0].asnumpy()
__magic_name__ = output_hf[0].detach().numpy()
__magic_name__ = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ = np.allclose(lowerCamelCase_ , lowerCamelCase_ , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , lowerCamelCase_ )
if __name__ == "__main__":
__magic_name__ : int =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
__magic_name__ : Optional[Any] =parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 664 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import LayoutLMConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.layoutlm.modeling_tf_layoutlm import (
TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMForMaskedLM,
TFLayoutLMForQuestionAnswering,
TFLayoutLMForSequenceClassification,
TFLayoutLMForTokenClassification,
TFLayoutLMModel,
)
class lowerCamelCase :
"""simple docstring"""
def __init__( self : List[str], _UpperCAmelCase : str, _UpperCAmelCase : Tuple=1_3, _UpperCAmelCase : Any=7, _UpperCAmelCase : int=True, _UpperCAmelCase : Union[str, Any]=True, _UpperCAmelCase : List[str]=True, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=9_9, _UpperCAmelCase : Dict=3_2, _UpperCAmelCase : Optional[Any]=2, _UpperCAmelCase : int=4, _UpperCAmelCase : int=3_7, _UpperCAmelCase : Optional[int]="gelu", _UpperCAmelCase : Dict=0.1, _UpperCAmelCase : Any=0.1, _UpperCAmelCase : Tuple=5_1_2, _UpperCAmelCase : List[str]=1_6, _UpperCAmelCase : List[Any]=2, _UpperCAmelCase : int=0.02, _UpperCAmelCase : Dict=3, _UpperCAmelCase : Optional[Any]=4, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : List[Any]=1_0_0_0, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : List[str] = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = seq_length
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_input_mask
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_token_type_ids
SCREAMING_SNAKE_CASE__ : int = use_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : List[str] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_act
SCREAMING_SNAKE_CASE__ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_labels
SCREAMING_SNAKE_CASE__ : Tuple = num_choices
SCREAMING_SNAKE_CASE__ : int = scope
SCREAMING_SNAKE_CASE__ : Any = range_bbox
def A_ ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
# convert bbox to numpy since TF does not support item assignment
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length, 4], self.range_bbox ).numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
SCREAMING_SNAKE_CASE__ : List[str] = bbox[i, j, 3]
SCREAMING_SNAKE_CASE__ : Optional[Any] = bbox[i, j, 1]
SCREAMING_SNAKE_CASE__ : Dict = t
if bbox[i, j, 2] < bbox[i, j, 0]:
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 2]
SCREAMING_SNAKE_CASE__ : str = bbox[i, j, 0]
SCREAMING_SNAKE_CASE__ : Tuple = t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Any = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE__ : Dict = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : str = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size], self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length], self.num_labels )
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size], self.num_choices )
SCREAMING_SNAKE_CASE__ : Any = LayoutLMConfig(
vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, )
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A_ ( self : str, _UpperCAmelCase : str, _UpperCAmelCase : Any, _UpperCAmelCase : List[str], _UpperCAmelCase : List[Any], _UpperCAmelCase : Tuple, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFLayoutLMModel(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, _UpperCAmelCase, token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size) )
def A_ ( self : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : List[Any], _UpperCAmelCase : str, _UpperCAmelCase : Dict, _UpperCAmelCase : List[str], _UpperCAmelCase : Tuple, _UpperCAmelCase : Optional[int], _UpperCAmelCase : List[Any] ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = TFLayoutLMForMaskedLM(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size) )
def A_ ( self : Tuple, _UpperCAmelCase : List[str], _UpperCAmelCase : Any, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Optional[int], _UpperCAmelCase : int, _UpperCAmelCase : int, _UpperCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFLayoutLMForSequenceClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def A_ ( self : Dict, _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : int, _UpperCAmelCase : Any, _UpperCAmelCase : Tuple, _UpperCAmelCase : int, _UpperCAmelCase : List[Any], _UpperCAmelCase : Any ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFLayoutLMForTokenClassification(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels) )
def A_ ( self : Any, _UpperCAmelCase : Optional[Any], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : Tuple, _UpperCAmelCase : List[str], _UpperCAmelCase : Union[str, Any], _UpperCAmelCase : Optional[int], _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFLayoutLMForQuestionAnswering(config=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict = model(_UpperCAmelCase, _UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length) )
def A_ ( self : Tuple ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,(
SCREAMING_SNAKE_CASE__
) ,
) : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Dict = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase (__lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = (
(
TFLayoutLMModel,
TFLayoutLMForMaskedLM,
TFLayoutLMForTokenClassification,
TFLayoutLMForSequenceClassification,
TFLayoutLMForQuestionAnswering,
)
if is_tf_available()
else ()
)
UpperCAmelCase_ = (
{
"feature-extraction": TFLayoutLMModel,
"fill-mask": TFLayoutLMForMaskedLM,
"text-classification": TFLayoutLMForSequenceClassification,
"token-classification": TFLayoutLMForTokenClassification,
"zero-shot": TFLayoutLMForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase_ = False
UpperCAmelCase_ = True
UpperCAmelCase_ = 10
def A_ ( self : int ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = TFLayoutLMModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[int] = ConfigTester(self, config_class=_UpperCAmelCase, hidden_size=3_7 )
def A_ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_ ( self : Dict ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_UpperCAmelCase )
def A_ ( self : Optional[int] ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
def A_ ( self : Any ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_UpperCAmelCase )
@slow
def A_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for model_name in TF_LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = TFLayoutLMModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip("Onnx compliancy broke with TF 2.10" )
def A_ ( self : List[Any] ) -> int:
"""simple docstring"""
pass
def _a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor([[1_01,10_19,10_14,10_16,10_37,1_28_49,47_47,10_04,1_42_46,22_78,54_39,45_24,50_02,29_30,21_93,29_30,43_41,32_08,10_05,10_55,21_71,28_48,1_13_00,35_31,1_02],[1_01,40_70,40_34,70_20,10_24,30_58,10_15,10_13,28_61,10_13,60_70,1_92_74,27_72,62_05,2_78_14,1_61_47,1_61_47,43_43,20_47,1_02_83,1_09_69,1_43_89,10_12,23_38,1_02]] ) # noqa: E231
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.convert_to_tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],] ) # noqa: E231
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor([[[0,0,0,0],[4_23,2_37,4_40,2_51],[4_27,2_72,4_41,2_87],[4_19,1_15,4_37,1_29],[9_61,8_85,9_92,9_12],[2_56,38,3_30,58],[2_56,38,3_30,58],[3_36,42,3_53,57],[3_60,39,4_01,56],[3_60,39,4_01,56],[4_11,39,4_71,59],[4_79,41,5_28,59],[5_33,39,6_30,60],[67,1_13,1_34,1_31],[1_41,1_15,2_09,1_32],[68,1_49,1_33,1_66],[1_41,1_49,1_87,1_64],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[1_95,1_48,2_87,1_65],[2_95,1_48,3_49,1_65],[4_41,1_49,4_92,1_66],[4_97,1_49,5_46,1_64],[64,2_01,1_25,2_18],[10_00,10_00,10_00,10_00]],[[0,0,0,0],[6_62,1_50,7_54,1_66],[6_65,1_99,7_42,2_11],[5_19,2_13,5_54,2_28],[5_19,2_13,5_54,2_28],[1_34,4_33,1_87,4_54],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[1_30,4_67,2_04,4_80],[3_14,4_69,3_76,4_82],[5_04,6_84,5_82,7_06],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[9_41,8_25,9_73,9_00],[6_10,7_49,6_52,7_65],[1_30,6_59,1_68,6_72],[1_76,6_57,2_37,6_72],[2_38,6_57,3_12,6_72],[4_43,6_53,6_28,6_72],[4_43,6_53,6_28,6_72],[7_16,3_01,8_25,3_17],[10_00,10_00,10_00,10_00]]] ) # noqa: E231
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]] ) # noqa: E231
# these are sequence labels (i.e. at the token level)
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor([[-1_00,10,10,10,9,1,-1_00,7,7,-1_00,7,7,4,2,5,2,8,8,-1_00,-1_00,5,0,3,2,-1_00],[-1_00,12,12,12,-1_00,12,10,-1_00,-1_00,-1_00,-1_00,10,12,9,-1_00,-1_00,-1_00,10,10,10,9,12,-1_00,10,-1_00]] ) # noqa: E231
# fmt: on
return input_ids, attention_mask, bbox, token_type_ids, labels
@require_tf
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMModel.from_pretrained("microsoft/layoutlm-base-uncased" )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : List[Any] = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : int = model(input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
# test the sequence output on [0, :3, :3]
SCREAMING_SNAKE_CASE__ : List[Any] = tf.convert_to_tensor(
[[0.1785, -0.1947, -0.0425], [-0.3254, -0.2807, 0.2553], [-0.5391, -0.3322, 0.3364]], )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3], _UpperCAmelCase, atol=1E-3 ) )
# test the pooled output on [1, :3]
SCREAMING_SNAKE_CASE__ : List[Any] = tf.convert_to_tensor([-0.6580, -0.0214, 0.8552] )
self.assertTrue(np.allclose(outputs.pooler_output[1, :3], _UpperCAmelCase, atol=1E-3 ) )
@slow
def A_ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
# initialize model with randomly initialized sequence classification head
SCREAMING_SNAKE_CASE__ : List[Any] = TFLayoutLMForSequenceClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=2 )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : str = model(
input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=tf.convert_to_tensor([1, 1] ), )
# test whether we get a loss as a scalar
SCREAMING_SNAKE_CASE__ : str = outputs.loss
SCREAMING_SNAKE_CASE__ : Any = (2,)
self.assertEqual(loss.shape, _UpperCAmelCase )
# test the shape of the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
SCREAMING_SNAKE_CASE__ : Tuple = (2, 2)
self.assertEqual(logits.shape, _UpperCAmelCase )
@slow
def A_ ( self : int ) -> List[Any]:
"""simple docstring"""
# initialize model with randomly initialized token classification head
SCREAMING_SNAKE_CASE__ : Dict = TFLayoutLMForTokenClassification.from_pretrained("microsoft/layoutlm-base-uncased", num_labels=1_3 )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : Optional[int] = model(
input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase, labels=_UpperCAmelCase )
# test the shape of the logits
SCREAMING_SNAKE_CASE__ : Tuple = outputs.logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = tf.convert_to_tensor((2, 2_5, 1_3) )
self.assertEqual(logits.shape, _UpperCAmelCase )
@slow
def A_ ( self : Union[str, Any] ) -> List[str]:
"""simple docstring"""
# initialize model with randomly initialized token classification head
SCREAMING_SNAKE_CASE__ : Tuple = TFLayoutLMForQuestionAnswering.from_pretrained("microsoft/layoutlm-base-uncased" )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Dict = prepare_layoutlm_batch_inputs()
# forward pass
SCREAMING_SNAKE_CASE__ : Dict = model(input_ids=_UpperCAmelCase, bbox=_UpperCAmelCase, attention_mask=_UpperCAmelCase, token_type_ids=_UpperCAmelCase )
# test the shape of the logits
SCREAMING_SNAKE_CASE__ : List[str] = tf.convert_to_tensor((2, 2_5) )
self.assertEqual(outputs.start_logits.shape, _UpperCAmelCase )
self.assertEqual(outputs.end_logits.shape, _UpperCAmelCase )
| 157 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[int] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
_lowerCamelCase : Optional[int] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict ) -> str:
'''simple docstring'''
for attribute in key.split("." ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "lm_head"
SCREAMING_SNAKE_CASE__ : List[str] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if weight_type is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ).shape
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
SCREAMING_SNAKE_CASE__ : List[Any] = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE__ : str = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
else:
SCREAMING_SNAKE_CASE__ : str = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Optional[int] = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE__ : str = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , hf_model.config.feat_extract_norm == "group" , )
SCREAMING_SNAKE_CASE__ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE__ : Dict = "unispeech." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
SCREAMING_SNAKE_CASE__ : str = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE__ : str = name.split(SCREAMING_SNAKE_CASE__ )[0].split("." )[-2]
SCREAMING_SNAKE_CASE__ : List[str] = mapped_key.replace("*" , SCREAMING_SNAKE_CASE__ )
if "weight_g" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_g"
elif "weight_v" in name:
SCREAMING_SNAKE_CASE__ : Optional[int] = "weight_v"
elif "bias" in name:
SCREAMING_SNAKE_CASE__ : Dict = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE__ : str = "weight"
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
set_recursively(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
continue
if not is_used:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str] ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = full_name.split("conv_layers." )[-1]
SCREAMING_SNAKE_CASE__ : Any = name.split("." )
SCREAMING_SNAKE_CASE__ : List[str] = int(items[0] )
SCREAMING_SNAKE_CASE__ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Dict = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
SCREAMING_SNAKE_CASE__ : int = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str=None , SCREAMING_SNAKE_CASE__ : Tuple=None , SCREAMING_SNAKE_CASE__ : int=True ) -> List[str]:
'''simple docstring'''
if config_path is not None:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : str = UniSpeechConfig()
if is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE__ : List[Any] = Dictionary.load_from_json(SCREAMING_SNAKE_CASE__ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE__ : str = target_dict.pad_index
SCREAMING_SNAKE_CASE__ : str = target_dict.bos_index
SCREAMING_SNAKE_CASE__ : int = target_dict.eos_index
SCREAMING_SNAKE_CASE__ : List[str] = len(target_dict.symbols )
SCREAMING_SNAKE_CASE__ : int = os.path.join(SCREAMING_SNAKE_CASE__ , "vocab.json" )
if not os.path.isdir(SCREAMING_SNAKE_CASE__ ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(SCREAMING_SNAKE_CASE__ ) )
return
os.makedirs(SCREAMING_SNAKE_CASE__ , exist_ok=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE__ : Dict = 42
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 43
with open(SCREAMING_SNAKE_CASE__ , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = WavaVecaPhonemeCTCTokenizer(
SCREAMING_SNAKE_CASE__ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True if config.feat_extract_norm == "layer" else False
SCREAMING_SNAKE_CASE__ : int = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=SCREAMING_SNAKE_CASE__ , return_attention_mask=SCREAMING_SNAKE_CASE__ , )
SCREAMING_SNAKE_CASE__ : str = WavaVecaProcessor(feature_extractor=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = UniSpeechForCTC(SCREAMING_SNAKE_CASE__ )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = UniSpeechForPreTraining(SCREAMING_SNAKE_CASE__ )
if is_finetuned:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Tuple = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] ), "w2v_path": checkpoint_path} )
else:
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
SCREAMING_SNAKE_CASE__ : int = model[0].eval()
recursively_load_weights(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
hf_unispeech.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
_lowerCamelCase : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 157 | 1 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase__ = {
'''vocab_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/vocab.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/vocab.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/vocab.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/vocab.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/vocab.json''',
},
'''merges_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/merges.txt''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/merges.txt''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/merges.txt''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/merges.txt''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''gpt2''': '''https://huggingface.co/gpt2/resolve/main/tokenizer.json''',
'''gpt2-medium''': '''https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json''',
'''gpt2-large''': '''https://huggingface.co/gpt2-large/resolve/main/tokenizer.json''',
'''gpt2-xl''': '''https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json''',
'''distilgpt2''': '''https://huggingface.co/distilgpt2/resolve/main/tokenizer.json''',
},
}
UpperCAmelCase__ = {
'''gpt2''': 1024,
'''gpt2-medium''': 1024,
'''gpt2-large''': 1024,
'''gpt2-xl''': 1024,
'''distilgpt2''': 1024,
}
class lowerCAmelCase__ ( lowerCamelCase_ ):
__a = VOCAB_FILES_NAMES
__a = PRETRAINED_VOCAB_FILES_MAP
__a = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a = ['''input_ids''', '''attention_mask''']
__a = GPTaTokenizer
def __init__( self : Union[str, Any] , _lowerCamelCase : Dict=None , _lowerCamelCase : Any=None , _lowerCamelCase : Optional[Any]=None , _lowerCamelCase : List[Any]="<|endoftext|>" , _lowerCamelCase : Tuple="<|endoftext|>" , _lowerCamelCase : Union[str, Any]="<|endoftext|>" , _lowerCamelCase : Optional[int]=False , **_lowerCamelCase : Union[str, Any] , ):
super().__init__(
__snake_case , __snake_case , tokenizer_file=__snake_case , unk_token=__snake_case , bos_token=__snake_case , eos_token=__snake_case , add_prefix_space=__snake_case , **__snake_case , )
_snake_case = kwargs.pop('''add_bos_token''' , __snake_case )
_snake_case = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('''add_prefix_space''' , __snake_case ) != add_prefix_space:
_snake_case = getattr(__snake_case , pre_tok_state.pop('''type''' ) )
_snake_case = add_prefix_space
_snake_case = pre_tok_class(**__snake_case )
_snake_case = add_prefix_space
def lowercase ( self : Optional[Any] , *_lowerCamelCase : List[str] , **_lowerCamelCase : Tuple ):
_snake_case = kwargs.get('''is_split_into_words''' , __snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__snake_case , **__snake_case )
def lowercase ( self : str , *_lowerCamelCase : Dict , **_lowerCamelCase : str ):
_snake_case = kwargs.get('''is_split_into_words''' , __snake_case )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__snake_case , **__snake_case )
def lowercase ( self : int , _lowerCamelCase : str , _lowerCamelCase : Optional[str] = None ):
_snake_case = self._tokenizer.model.save(__snake_case , name=__snake_case )
return tuple(__snake_case )
def lowercase ( self : int , _lowerCamelCase : "Conversation" ):
_snake_case = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__snake_case , add_special_tokens=__snake_case ) + [self.eos_token_id] )
if len(__snake_case ) > self.model_max_length:
_snake_case = input_ids[-self.model_max_length :]
return input_ids
| 224 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def __lowercase ( __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : str , __lowerCAmelCase : Path , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , __lowerCAmelCase : str = None , ):
if config_name_or_path is None:
a__ = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
a__ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
a__ = question_encoder_name_or_path
a__ = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
a__ = RagConfig.from_pretrained(__lowerCAmelCase )
a__ = AutoConfig.from_pretrained(__lowerCAmelCase )
a__ = AutoConfig.from_pretrained(__lowerCAmelCase )
a__ = gen_config
a__ = question_encoder_config
a__ = model_class.from_pretrained_question_encoder_generator(
__lowerCAmelCase , __lowerCAmelCase , config=__lowerCAmelCase )
rag_model.save_pretrained(__lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(__lowerCAmelCase )
# Save tokenizers.
a__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
a__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
snake_case : List[Any] = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
snake_case : Union[str, Any] = parser.parse_args()
snake_case : Dict = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 335 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
__A ={
"""configuration_trocr""": ["""TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TrOCRConfig"""],
"""processing_trocr""": ["""TrOCRProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A =[
"""TROCR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TrOCRForCausalLM""",
"""TrOCRPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
__A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 716 |
'''simple docstring'''
import os
def _UpperCamelCase ( UpperCamelCase__ = "input.txt" ):
with open(os.path.join(os.path.dirname(UpperCamelCase__ ) , UpperCamelCase__ ) ) as input_file:
UpperCAmelCase__ : Tuple = [
[int(UpperCamelCase__ ) for element in line.split(""",""" )]
for line in input_file.readlines()
]
UpperCAmelCase__ : Optional[Any] = len(UpperCamelCase__ )
UpperCAmelCase__ : Any = len(matrix[0] )
UpperCAmelCase__ : Optional[int] = [[-1 for _ in range(UpperCamelCase__ )] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Any = matrix[i][0]
for j in range(1 , UpperCamelCase__ ):
for i in range(UpperCamelCase__ ):
UpperCAmelCase__ : Any = minimal_path_sums[i][j - 1] + matrix[i][j]
for i in range(1 , UpperCamelCase__ ):
UpperCAmelCase__ : Dict = min(
minimal_path_sums[i][j] , minimal_path_sums[i - 1][j] + matrix[i][j] )
for i in range(rows - 2 , -1 , -1 ):
UpperCAmelCase__ : int = min(
minimal_path_sums[i][j] , minimal_path_sums[i + 1][j] + matrix[i][j] )
return min(minimal_path_sums_row[-1] for minimal_path_sums_row in minimal_path_sums )
if __name__ == "__main__":
print(f"""{solution() = }""") | 113 | 0 |
from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class __snake_case ( UpperCamelCase_ ):
def __init__( self : Optional[int] , A_ : Distribution , A_ : int=None , A_ : Optional[int]=None , A_ : Tuple=0):
lowerCAmelCase_ : List[str] = 1.0 if scale is None else scale
lowerCAmelCase_ : Tuple = 0.0 if loc is None else loc
super().__init__(A_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=A_)])
@property
def UpperCAmelCase__ ( self : int):
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCAmelCase__ ( self : Dict):
return self.base_dist.variance * self.scale**2
@property
def UpperCAmelCase__ ( self : str):
return self.variance.sqrt()
class __snake_case ( nn.Module ):
def __init__( self : Any , A_ : int , A_ : Dict[str, int] , A_ : Callable[..., Tuple[torch.Tensor]] , **A_ : List[Any]):
super().__init__(**A_)
lowerCAmelCase_ : Union[str, Any] = args_dim
lowerCAmelCase_ : Any = nn.ModuleList([nn.Linear(A_ , A_) for dim in args_dim.values()])
lowerCAmelCase_ : Optional[Any] = domain_map
def UpperCAmelCase__ ( self : List[Any] , A_ : torch.Tensor):
lowerCAmelCase_ : List[Any] = [proj(A_) for proj in self.proj]
return self.domain_map(*A_)
class __snake_case ( nn.Module ):
def __init__( self : Optional[Any] , A_ : List[Any]):
super().__init__()
lowerCAmelCase_ : List[str] = function
def UpperCAmelCase__ ( self : List[Any] , A_ : Dict , *A_ : Optional[Any]):
return self.function(A_ , *A_)
class __snake_case :
_a = 42
_a = 42
_a = 42
def __init__( self : str , A_ : int = 1):
lowerCAmelCase_ : Union[str, Any] = dim
lowerCAmelCase_ : Tuple = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCAmelCase__ ( self : Tuple , A_ : str):
if self.dim == 1:
return self.distribution_class(*A_)
else:
return Independent(self.distribution_class(*A_) , 1)
def UpperCAmelCase__ ( self : int , A_ : Dict , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None , ):
lowerCAmelCase_ : Union[str, Any] = self._base_distribution(A_)
if loc is None and scale is None:
return distr
else:
return AffineTransformed(A_ , loc=A_ , scale=A_ , event_dim=self.event_dim)
@property
def UpperCAmelCase__ ( self : str):
return () if self.dim == 1 else (self.dim,)
@property
def UpperCAmelCase__ ( self : Optional[Any]):
return len(self.event_shape)
@property
def UpperCAmelCase__ ( self : List[str]):
return 0.0
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : int):
return ParameterProjection(
in_features=A_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map) , )
def UpperCAmelCase__ ( self : List[Any] , *A_ : torch.Tensor):
raise NotImplementedError()
@staticmethod
def UpperCAmelCase__ ( A_ : torch.Tensor):
return (x + torch.sqrt(torch.square(A_) + 4.0)) / 2.0
class __snake_case ( UpperCamelCase_ ):
_a = {"df": 1, "loc": 1, "scale": 1}
_a = StudentT
@classmethod
def UpperCAmelCase__ ( cls : List[str] , A_ : torch.Tensor , A_ : torch.Tensor , A_ : torch.Tensor):
lowerCAmelCase_ : List[Any] = cls.squareplus(A_).clamp_min(torch.finfo(scale.dtype).eps)
lowerCAmelCase_ : str = 2.0 + cls.squareplus(A_)
return df.squeeze(-1), loc.squeeze(-1), scale.squeeze(-1)
class __snake_case ( UpperCamelCase_ ):
_a = {"loc": 1, "scale": 1}
_a = Normal
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , A_ : torch.Tensor , A_ : torch.Tensor):
lowerCAmelCase_ : Any = cls.squareplus(A_).clamp_min(torch.finfo(scale.dtype).eps)
return loc.squeeze(-1), scale.squeeze(-1)
class __snake_case ( UpperCamelCase_ ):
_a = {"total_count": 1, "logits": 1}
_a = NegativeBinomial
@classmethod
def UpperCAmelCase__ ( cls : Dict , A_ : torch.Tensor , A_ : torch.Tensor):
lowerCAmelCase_ : Dict = cls.squareplus(A_)
return total_count.squeeze(-1), logits.squeeze(-1)
def UpperCAmelCase__ ( self : Dict , A_ : Optional[Any]):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = distr_args
if self.dim == 1:
return self.distribution_class(total_count=A_ , logits=A_)
else:
return Independent(self.distribution_class(total_count=A_ , logits=A_) , 1)
def UpperCAmelCase__ ( self : str , A_ : int , A_ : Optional[torch.Tensor] = None , A_ : Optional[torch.Tensor] = None):
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits))
| 171 |
import random
from typing import Any
def UpperCamelCase( __UpperCamelCase : list ):
for _ in range(len(__UpperCamelCase ) ):
lowerCAmelCase_ : Union[str, Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ : List[Any] = random.randint(0 ,len(__UpperCamelCase ) - 1 )
lowerCAmelCase_ , lowerCAmelCase_ : Dict = data[b], data[a]
return data
if __name__ == "__main__":
A__ : List[Any] = [0, 1, 2, 3, 4, 5, 6, 7]
A__ : int = ['''python''', '''says''', '''hello''', '''!''']
print('''Fisher-Yates Shuffle:''')
print('''List''', integers, strings)
print('''FY Shuffle''', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings))
| 171 | 1 |
'''simple docstring'''
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
global f # a global dp table for knapsack
if f[i][j] < 0:
if j < wt[i - 1]:
_UpperCAmelCase : Optional[int] =mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase : List[str] =max(
mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) , mf_knapsack(i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , j - wt[i - 1] ) + val[i - 1] , )
_UpperCAmelCase : Union[str, Any] =val
return f[i][j]
def lowerCamelCase__ ( __lowerCamelCase : Dict , __lowerCamelCase : str , __lowerCamelCase : List[str] , __lowerCamelCase : int ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =[[0] * (w + 1) for _ in range(n + 1 )]
for i in range(1 , n + 1 ):
for w_ in range(1 , w + 1 ):
if wt[i - 1] <= w_:
_UpperCAmelCase : str =max(val[i - 1] + dp[i - 1][w_ - wt[i - 1]] , dp[i - 1][w_] )
else:
_UpperCAmelCase : Optional[Any] =dp[i - 1][w_]
return dp[n][w_], dp
def lowerCamelCase__ ( __lowerCamelCase : int , __lowerCamelCase : list , __lowerCamelCase : list ):
'''simple docstring'''
if not (isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(_SCREAMING_SNAKE_CASE , (list, tuple) )):
raise ValueError(
'Both the weights and values vectors must be either lists or tuples' )
_UpperCAmelCase : List[str] =len(_SCREAMING_SNAKE_CASE )
if num_items != len(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[int] =(
'The number of weights must be the same as the number of values.\n'
f"But got {num_items} weights and {len(_SCREAMING_SNAKE_CASE )} values"
)
raise ValueError(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
if not isinstance(wt[i] , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : List[str] =(
'All weights must be integers but got weight of '
f"type {type(wt[i] )} at index {i}"
)
raise TypeError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase : Tuple =knapsack(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase : Any =set()
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
return optimal_val, example_optional_set
def lowerCamelCase__ ( __lowerCamelCase : list , __lowerCamelCase : list , __lowerCamelCase : int , __lowerCamelCase : int , __lowerCamelCase : set ):
'''simple docstring'''
if i > 0 and j > 0:
if dp[i - 1][j] == dp[i][j]:
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
optimal_set.add(_SCREAMING_SNAKE_CASE )
_construct_solution(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , i - 1 , j - wt[i - 1] , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowercase =[3, 2, 4, 4]
lowercase =[4, 3, 2, 3]
lowercase =4
lowercase =6
lowercase =[[0] * (w + 1)] + [[0] + [-1] * (w + 1) for _ in range(n + 1)]
lowercase =knapsack(w, wt, val, n)
print(optimal_solution)
print(mf_knapsack(n, wt, val, w)) # switched the n and w
# testing the dynamic programming problem with example
# the optimal subset for the above example are items 3 and 4
lowercase =knapsack_with_example_solution(w, wt, val)
assert optimal_solution == 8
assert optimal_subset == {3, 4}
print('optimal_value = ', optimal_solution)
print('An optimal subset corresponding to the optimal value', optimal_subset)
| 707 |
'''simple docstring'''
import unittest
from datasets import load_dataset
from transformers import BloomTokenizerFast
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =None
UpperCAmelCase =BloomTokenizerFast
UpperCAmelCase =BloomTokenizerFast
UpperCAmelCase =True
UpperCAmelCase =False
UpperCAmelCase ="tokenizer_file"
UpperCAmelCase ={"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "pad_token": "<pad>"}
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
super().setUp()
_UpperCAmelCase : Union[str, Any] =BloomTokenizerFast.from_pretrained('bigscience/tokenizer')
tokenizer.save_pretrained(self.tmpdirname)
def lowerCAmelCase ( self , **snake_case) -> List[Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map)
return BloomTokenizerFast.from_pretrained(self.tmpdirname , **snake_case)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.get_rust_tokenizer()
_UpperCAmelCase : Any =['The quick brown fox</s>', 'jumps over the lazy dog</s>']
_UpperCAmelCase : int =[[2_1_7_5, 2_3_7_1_4, 7_3_1_7_3, 1_4_4_2_5_2, 2], [7_7, 1_3_2_6_1_9, 3_4_7_8, 3_6_8, 1_0_9_5_8_6, 3_5_4_3_3, 2]]
_UpperCAmelCase : Tuple =tokenizer.batch_encode_plus(snake_case)['input_ids']
self.assertListEqual(snake_case , snake_case)
_UpperCAmelCase : Any =tokenizer.batch_decode(snake_case)
self.assertListEqual(snake_case , snake_case)
def lowerCAmelCase ( self , snake_case=6) -> str:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})"):
_UpperCAmelCase : Optional[int] =self.rust_tokenizer_class.from_pretrained(snake_case , **snake_case)
# tokenizer_r.pad_token = None # Hotfixing padding = None
# Simple input
_UpperCAmelCase : Dict ='This is a simple input'
_UpperCAmelCase : str =['This is a simple input 1', 'This is a simple input 2']
_UpperCAmelCase : List[Any] =('This is a simple input', 'This is a pair')
_UpperCAmelCase : Union[str, Any] =[
('This is a simple input 1', 'This is a simple input 2'),
('This is a simple pair 1', 'This is a simple pair 2'),
]
# Simple input tests
try:
tokenizer_r.encode(snake_case , max_length=snake_case)
tokenizer_r.encode_plus(snake_case , max_length=snake_case)
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case)
tokenizer_r.encode(snake_case , max_length=snake_case)
tokenizer_r.batch_encode_plus(snake_case , max_length=snake_case)
except ValueError:
self.fail('Bloom Tokenizer should be able to deal with padding')
_UpperCAmelCase : Tuple =None # Hotfixing padding = None
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Simple input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(snake_case , tokenizer_r.encode_plus , snake_case , max_length=snake_case , padding='max_length')
# Pair input
self.assertRaises(
snake_case , tokenizer_r.batch_encode_plus , snake_case , max_length=snake_case , padding='max_length' , )
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =self.get_rust_tokenizer()
_UpperCAmelCase : List[Any] =load_dataset('xnli' , 'all_languages' , split='test' , streaming=snake_case)
_UpperCAmelCase : List[Any] =next(iter(snake_case))['premise'] # pick up one data
_UpperCAmelCase : Union[str, Any] =list(sample_data.values())
_UpperCAmelCase : Dict =list(map(tokenizer.encode , snake_case))
_UpperCAmelCase : Optional[Any] =[tokenizer.decode(snake_case , clean_up_tokenization_spaces=snake_case) for x in output_tokens]
self.assertListEqual(snake_case , snake_case)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
# The test has to be overriden because BLOOM uses ALiBi positional embeddings that does not have
# any sequence length constraints. This test of the parent class will fail since it relies on the
# maximum sequence length of the positoonal embeddings.
self.assertGreaterEqual(len(self.tokenizer_class.pretrained_vocab_files_map) , 1)
self.assertGreaterEqual(len(list(self.tokenizer_class.pretrained_vocab_files_map.values())[0]) , 1)
| 331 | 0 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
_SCREAMING_SNAKE_CASE = "examples/"
_SCREAMING_SNAKE_CASE = {
"examples": (re.compile(r"^check_min_version\(\"[^\"]+\"\)\s*$", re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(r"^__version__\s+=\s+\"([^\"]+)\"\s*$", re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(r"^(\s*)version\s*=\s*\"[^\"]+\",", re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(r"^(\s*)release\s*=\s*\"[^\"]+\"$", re.MULTILINE), "release = \"VERSION\"\n"),
}
_SCREAMING_SNAKE_CASE = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
_SCREAMING_SNAKE_CASE = "README.md"
def __lowerCamelCase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Any , __lowerCAmelCase : Dict ) -> Dict:
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case = f.read()
snake_case , snake_case = REPLACE_PATTERNS[pattern]
snake_case = replace.replace("""VERSION""" , __lowerCAmelCase )
snake_case = re_pattern.sub(__lowerCAmelCase , __lowerCAmelCase )
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : List[Any] ) -> int:
for folder, directories, fnames in os.walk(__lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(__lowerCAmelCase , __lowerCAmelCase ) , __lowerCAmelCase , pattern="""examples""" )
def __lowerCamelCase ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any]=False ) -> List[Any]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not patch:
update_version_in_examples(__lowerCAmelCase )
def __lowerCamelCase ( ) -> Optional[Any]:
snake_case = """🤗 Transformers currently provides the following architectures"""
snake_case = """1. Want to contribute a new model?"""
with open(__lowerCAmelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
snake_case = f.readlines()
# Find the start of the list.
snake_case = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
snake_case = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
snake_case = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(__lowerCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
snake_case = f.read()
snake_case = REPLACE_PATTERNS["""init"""][0].search(__lowerCAmelCase ).groups()[0]
return packaging.version.parse(__lowerCAmelCase )
def __lowerCamelCase ( __lowerCAmelCase : Union[str, Any]=False ) -> Union[str, Any]:
snake_case = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
snake_case = default_version.base_version
elif patch:
snake_case = F'''{default_version.major}.{default_version.minor}.{default_version.micro + 1}'''
else:
snake_case = F'''{default_version.major}.{default_version.minor + 1}.0'''
# Now let's ask nicely if that's the right one.
snake_case = input(F'''Which version are you releasing? [{default_version}]''' )
if len(__lowerCAmelCase ) == 0:
snake_case = default_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase , patch=__lowerCAmelCase )
def __lowerCamelCase ( ) -> List[str]:
snake_case = get_version()
snake_case = F'''{current_version.major}.{current_version.minor + 1}.0.dev0'''
snake_case = current_version.base_version
# Check with the user we got that right.
snake_case = input(F'''Which version are we developing now? [{dev_version}]''' )
if len(__lowerCAmelCase ) == 0:
snake_case = dev_version
print(F'''Updating version to {version}.''' )
global_version_update(__lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--post_release", action="store_true", help="Whether this is pre or post release.")
parser.add_argument("--patch", action="store_true", help="Whether or not this is a patch release.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print("Nothing to do after a patch :-)")
else:
post_release_work()
| 369 |
'''simple docstring'''
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin, SchedulerOutput
@dataclass
class _lowerCAmelCase ( A__ ):
"""simple docstring"""
snake_case_ = 42
snake_case_ = 42
class _lowerCAmelCase ( A__ , A__ ):
"""simple docstring"""
snake_case_ = 1
@register_to_config
def __init__( self : str , __snake_case : int = 20_00 , __snake_case : float = 0.15 , __snake_case : float = 0.01 , __snake_case : float = 13_48.0 , __snake_case : float = 1e-5 , __snake_case : int = 1 , )-> str:
# standard deviation of the initial noise distribution
snake_case = sigma_max
# setable values
snake_case = None
self.set_sigmas(__snake_case , __snake_case , __snake_case , __snake_case )
def lowerCAmelCase ( self : List[Any] , __snake_case : torch.FloatTensor , __snake_case : Optional[int] = None )-> torch.FloatTensor:
return sample
def lowerCAmelCase ( self : List[str] , __snake_case : int , __snake_case : float = None , __snake_case : Union[str, torch.device] = None )-> Optional[Any]:
snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
snake_case = torch.linspace(1 , __snake_case , __snake_case , device=__snake_case )
def lowerCAmelCase ( self : Union[str, Any] , __snake_case : int , __snake_case : float = None , __snake_case : float = None , __snake_case : float = None )-> str:
snake_case = sigma_min if sigma_min is not None else self.config.sigma_min
snake_case = sigma_max if sigma_max is not None else self.config.sigma_max
snake_case = sampling_eps if sampling_eps is not None else self.config.sampling_eps
if self.timesteps is None:
self.set_timesteps(__snake_case , __snake_case )
snake_case = sigma_min * (sigma_max / sigma_min) ** (self.timesteps / sampling_eps)
snake_case = torch.exp(torch.linspace(math.log(__snake_case ) , math.log(__snake_case ) , __snake_case ) )
snake_case = torch.tensor([sigma_min * (sigma_max / sigma_min) ** t for t in self.timesteps] )
def lowerCAmelCase ( self : str , __snake_case : List[str] , __snake_case : str )-> Optional[int]:
return torch.where(
timesteps == 0 , torch.zeros_like(t.to(timesteps.device ) ) , self.discrete_sigmas[timesteps - 1].to(timesteps.device ) , )
def lowerCAmelCase ( self : int , __snake_case : torch.FloatTensor , __snake_case : int , __snake_case : torch.FloatTensor , __snake_case : Optional[torch.Generator] = None , __snake_case : bool = True , )-> Union[SdeVeOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
snake_case = timestep * torch.ones(
sample.shape[0] , device=sample.device ) # torch.repeat_interleave(timestep, sample.shape[0])
snake_case = (timestep * (len(self.timesteps ) - 1)).long()
# mps requires indices to be in the same device, so we use cpu as is the default with cuda
snake_case = timesteps.to(self.discrete_sigmas.device )
snake_case = self.discrete_sigmas[timesteps].to(sample.device )
snake_case = self.get_adjacent_sigma(__snake_case , __snake_case ).to(sample.device )
snake_case = torch.zeros_like(__snake_case )
snake_case = (sigma**2 - adjacent_sigma**2) ** 0.5
# equation 6 in the paper: the model_output modeled by the network is grad_x log pt(x)
# also equation 47 shows the analog from SDE models to ancestral sampling methods
snake_case = diffusion.flatten()
while len(diffusion.shape ) < len(sample.shape ):
snake_case = diffusion.unsqueeze(-1 )
snake_case = drift - diffusion**2 * model_output
# equation 6: sample noise for the diffusion term of
snake_case = randn_tensor(
sample.shape , layout=sample.layout , generator=__snake_case , device=sample.device , dtype=sample.dtype )
snake_case = sample - drift # subtract because `dt` is a small negative timestep
# TODO is the variable diffusion the correct scaling term for the noise?
snake_case = prev_sample_mean + diffusion * noise # add impact of diffusion field g
if not return_dict:
return (prev_sample, prev_sample_mean)
return SdeVeOutput(prev_sample=__snake_case , prev_sample_mean=__snake_case )
def lowerCAmelCase ( self : Optional[Any] , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : Optional[torch.Generator] = None , __snake_case : bool = True , )-> Union[SchedulerOutput, Tuple]:
if self.timesteps is None:
raise ValueError(
"""`self.timesteps` is not set, you need to run 'set_timesteps' after creating the scheduler""" )
# For small batch sizes, the paper "suggest replacing norm(z) with sqrt(d), where d is the dim. of z"
# sample noise for correction
snake_case = randn_tensor(sample.shape , layout=sample.layout , generator=__snake_case ).to(sample.device )
# compute step size from the model_output, the noise, and the snr
snake_case = torch.norm(model_output.reshape(model_output.shape[0] , -1 ) , dim=-1 ).mean()
snake_case = torch.norm(noise.reshape(noise.shape[0] , -1 ) , dim=-1 ).mean()
snake_case = (self.config.snr * noise_norm / grad_norm) ** 2 * 2
snake_case = step_size * torch.ones(sample.shape[0] ).to(sample.device )
# self.repeat_scalar(step_size, sample.shape[0])
# compute corrected sample: model_output term and noise term
snake_case = step_size.flatten()
while len(step_size.shape ) < len(sample.shape ):
snake_case = step_size.unsqueeze(-1 )
snake_case = sample + step_size * model_output
snake_case = prev_sample_mean + ((step_size * 2) ** 0.5) * noise
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__snake_case )
def lowerCAmelCase ( self : str , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , __snake_case : torch.FloatTensor , )-> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
snake_case = timesteps.to(original_samples.device )
snake_case = self.discrete_sigmas.to(original_samples.device )[timesteps]
snake_case = (
noise * sigmas[:, None, None, None]
if noise is not None
else torch.randn_like(__snake_case ) * sigmas[:, None, None, None]
)
snake_case = noise + original_samples
return noisy_samples
def __len__( self : str )-> Any:
return self.config.num_train_timesteps
| 369 | 1 |
from __future__ import annotations
from collections import namedtuple
def __UpperCamelCase ( _A : float , _A : float , _A : float ) ->tuple:
"""simple docstring"""
lowerCamelCase_ =namedtuple("""result""" , """name value""" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("""Only one argument must be 0""" )
elif power < 0:
raise ValueError(
"""Power cannot be negative in any electrical/electronics system""" )
elif voltage == 0:
return result("""voltage""" , power / current )
elif current == 0:
return result("""current""" , power / voltage )
elif power == 0:
return result("""power""" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
__A : int = '%20'.join(argv[1:]) if len(argv) > 1 else quote(str(input('Search: ')))
print('Googling.....')
__A : str = F"""https://www.google.com/search?q={query}&num=100"""
__A : int = requests.get(
url,
headers={'User-Agent': str(UserAgent().random)},
)
try:
__A : str = (
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'yuRUbf'})
.find('a')
.get('href')
)
except AttributeError:
__A : Any = parse_qs(
BeautifulSoup(res.text, 'html.parser')
.find('div', attrs={'class': 'kCrYT'})
.find('a')
.get('href')
)['url'][0]
webbrowser.open(link)
| 75 | 1 |
from collections import deque
from .hash_table import HashTable
class _SCREAMING_SNAKE_CASE ( __snake_case ):
'''simple docstring'''
def __init__( self : Optional[int] , *__lowerCamelCase : Union[str, Any] , **__lowerCamelCase : Dict ):
super().__init__(*__lowerCamelCase , **__lowerCamelCase )
def _snake_case ( self : str , __lowerCamelCase : Dict , __lowerCamelCase : List[str] ):
SCREAMING_SNAKE_CASE = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(__lowerCamelCase )
SCREAMING_SNAKE_CASE = self.values[key]
def _snake_case ( self : int ):
return (
sum(self.charge_factor - len(__lowerCamelCase ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _snake_case ( self : List[str] , __lowerCamelCase : List[Any] , __lowerCamelCase : Optional[int]=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(__lowerCamelCase ) == 0
):
return key
return super()._collision_resolution(__lowerCamelCase , __lowerCamelCase ) | 16 |
"""simple docstring"""
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
_UpperCAmelCase = 1_0
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
for i in range(lowercase , lowercase ):
if array[i] == target:
return i
return -1
def __magic_name__ ( lowercase , lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =0
SCREAMING_SNAKE_CASE_: List[Any] =len(lowercase )
while left <= right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =(left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: str =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
SCREAMING_SNAKE_CASE_: Union[str, Any] =one_third - 1
elif array[two_third] < target:
SCREAMING_SNAKE_CASE_: Optional[int] =two_third + 1
else:
SCREAMING_SNAKE_CASE_: List[str] =one_third + 1
SCREAMING_SNAKE_CASE_: Optional[Any] =two_third - 1
else:
return -1
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase ):
if left < right:
if right - left < precision:
return lin_search(lowercase , lowercase , lowercase , lowercase )
SCREAMING_SNAKE_CASE_: Union[str, Any] =(left + right) // 3 + 1
SCREAMING_SNAKE_CASE_: List[str] =2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(lowercase , one_third - 1 , lowercase , lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , lowercase , lowercase , lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , lowercase , lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
_UpperCAmelCase = input("""Enter numbers separated by comma:\n""").strip()
_UpperCAmelCase = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), f"List must be ordered.\n{collection}."
_UpperCAmelCase = int(input("""Enter the number to be found in the list:\n""").strip())
_UpperCAmelCase = ite_ternary_search(collection, target)
_UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(f"""Iterative search: {target} found at positions: {resulta}""")
print(f"""Recursive search: {target} found at positions: {resulta}""")
else:
print("""Not found""")
| 409 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__UpperCamelCase : Optional[Any] = {
'''configuration_efficientformer''': [
'''EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''EfficientFormerConfig''',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = ['''EfficientFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : Any = [
'''EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''EfficientFormerForImageClassification''',
'''EfficientFormerForImageClassificationWithTeacher''',
'''EfficientFormerModel''',
'''EfficientFormerPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCamelCase : List[str] = [
'''TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFEfficientFormerForImageClassification''',
'''TFEfficientFormerForImageClassificationWithTeacher''',
'''TFEfficientFormerModel''',
'''TFEfficientFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
__UpperCamelCase : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 417 |
'''simple docstring'''
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[int]):
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path)
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : list[int] , lowerCAmelCase : int):
"""simple docstring"""
if curr_ind == len(lowerCAmelCase):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(lowerCAmelCase)):
if valid_connection(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase):
# Insert current vertex into path as next transition
_A : Tuple = next_ver
# Validate created path
if util_hamilton_cycle(lowerCAmelCase , lowerCAmelCase , curr_ind + 1):
return True
# Backtrack
_A : Optional[int] = -1
return False
def lowercase ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int = 0):
"""simple docstring"""
_A : Optional[Any] = [-1] * (len(lowerCAmelCase) + 1)
# initialize start and end of path with starting index
_A : int = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(lowerCAmelCase , lowerCAmelCase , 1) else []
| 417 | 1 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class A__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase__ : Dict[str, int] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int = None , lowerCamelCase__ : int = None ):
super().__init__()
a__ : Optional[Any] = pad_token_id
a__ : str = max_length
a__ : str = vocab
a__ : List[Any] = merges
a__ : List[Any] = BytePairTokenizer(lowerCamelCase__ , lowerCamelCase__ , sequence_length=lowerCamelCase__ )
@classmethod
def _UpperCamelCase( cls : Optional[int] , lowerCamelCase__ : GPTaTokenizer , *lowerCamelCase__ : Union[str, Any] , **lowerCamelCase__ : List[Any] ):
a__ : Optional[Any] = [" ".join(lowerCamelCase__ ) for m in tokenizer.bpe_ranks.keys()]
a__ : Optional[Any] = tokenizer.get_vocab()
return cls(lowerCamelCase__ , lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def _UpperCamelCase( cls : Optional[Any] , lowerCamelCase__ : Union[str, os.PathLike] , *lowerCamelCase__ : Any , **lowerCamelCase__ : List[Any] ):
a__ : int = GPTaTokenizer.from_pretrained(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
return cls.from_tokenizer(lowerCamelCase__ , *lowerCamelCase__ , **lowerCamelCase__ )
@classmethod
def _UpperCamelCase( cls : Optional[int] , lowerCamelCase__ : int ):
return cls(**lowerCamelCase__ )
def _UpperCamelCase( self : List[str] ):
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def _UpperCamelCase( self : Union[str, Any] , lowerCamelCase__ : int , lowerCamelCase__ : int = None ):
a__ : Any = self.tf_tokenizer(lowerCamelCase__ )
a__ : Tuple = tf.ones_like(lowerCamelCase__ )
if self.pad_token_id is not None:
# pad the tokens up to max length
a__ : str = max_length if max_length is not None else self.max_length
if max_length is not None:
a__, a__ : Any = pad_model_inputs(
lowerCamelCase__ , max_seq_length=lowerCamelCase__ , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 37 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[str] = ['''onnx''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : Dict ) -> Any:
requires_backends(self , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
requires_backends(cls , ['onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
requires_backends(cls , ['onnx'] )
| 570 | 0 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def A__ ( ):
'''simple docstring'''
UpperCamelCase : int = ArgumentParser("Diffusers CLI tool" , usage="diffusers-cli <command> [<args>]")
UpperCamelCase : str = parser.add_subparsers(help="diffusers-cli command helpers")
# Register commands
EnvironmentCommand.register_subcommand(A)
# Let's go
UpperCamelCase : Tuple = parser.parse_args()
if not hasattr(A , "func"):
parser.print_help()
exit(1)
# Run
UpperCamelCase : Optional[int] = args.func(A)
service.run()
if __name__ == "__main__":
main()
| 435 |
'''simple docstring'''
from .glue import glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels
from .squad import SquadExample, SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
from .utils import DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor
from .xnli import xnli_output_modes, xnli_processors, xnli_tasks_num_labels
| 435 | 1 |
lowerCamelCase__ = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
lowerCamelCase__ = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
lowerCamelCase__ = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 455 |
'''simple docstring'''
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
__UpperCAmelCase = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : bool = field(default=a__ , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase__ : bool = field(
default=a__ , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase__ : Optional[int] = field(
default=a__ , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase__ : Optional[Union[str, Path, GenerationConfig]] = field(
default=a__ , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
lowerCAmelCase__ = super().to_dict()
for k, v in d.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
lowerCAmelCase__ = v.to_dict()
return d | 90 | 0 |
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __magic_name__ :
def __init__( self : Optional[int] ,__SCREAMING_SNAKE_CASE : Dict ,__SCREAMING_SNAKE_CASE : Optional[int]=1_3 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=3_2 ,__SCREAMING_SNAKE_CASE : str=2 ,__SCREAMING_SNAKE_CASE : str=3 ,__SCREAMING_SNAKE_CASE : List[str]=1_6 ,__SCREAMING_SNAKE_CASE : Tuple=[1, 2, 1] ,__SCREAMING_SNAKE_CASE : List[Any]=[2, 2, 4] ,__SCREAMING_SNAKE_CASE : Tuple=2 ,__SCREAMING_SNAKE_CASE : int=2.0 ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : Any=0.0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 ,__SCREAMING_SNAKE_CASE : Tuple=0.1 ,__SCREAMING_SNAKE_CASE : List[str]="gelu" ,__SCREAMING_SNAKE_CASE : Dict=False ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : Any=0.02 ,__SCREAMING_SNAKE_CASE : Tuple=1e-5 ,__SCREAMING_SNAKE_CASE : str=True ,__SCREAMING_SNAKE_CASE : Any=None ,__SCREAMING_SNAKE_CASE : Optional[int]=True ,__SCREAMING_SNAKE_CASE : List[Any]=1_0 ,__SCREAMING_SNAKE_CASE : Union[str, Any]=8 ,__SCREAMING_SNAKE_CASE : str=["stage1", "stage2", "stage3"] ,__SCREAMING_SNAKE_CASE : int=[1, 2, 3] ,):
UpperCAmelCase = parent
UpperCAmelCase = batch_size
UpperCAmelCase = image_size
UpperCAmelCase = patch_size
UpperCAmelCase = num_channels
UpperCAmelCase = embed_dim
UpperCAmelCase = depths
UpperCAmelCase = num_heads
UpperCAmelCase = window_size
UpperCAmelCase = mlp_ratio
UpperCAmelCase = qkv_bias
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = drop_path_rate
UpperCAmelCase = hidden_act
UpperCAmelCase = use_absolute_embeddings
UpperCAmelCase = patch_norm
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = is_training
UpperCAmelCase = scope
UpperCAmelCase = use_labels
UpperCAmelCase = type_sequence_label_size
UpperCAmelCase = encoder_stride
UpperCAmelCase = out_features
UpperCAmelCase = out_indices
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase = None
if self.use_labels:
UpperCAmelCase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _UpperCAmelCase ( self : str ):
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def _UpperCAmelCase ( self : int ,__SCREAMING_SNAKE_CASE : List[str] ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = MaskFormerSwinModel(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def _UpperCAmelCase ( self : Tuple ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[Any] ,__SCREAMING_SNAKE_CASE : Tuple ):
UpperCAmelCase = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = model(__SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase = ["stem"]
UpperCAmelCase = MaskFormerSwinBackbone(config=__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = config_and_inputs
UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __magic_name__ ( _snake_case , _snake_case , unittest.TestCase):
_UpperCAmelCase : Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_UpperCAmelCase : Optional[int] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
_UpperCAmelCase : Dict = False
_UpperCAmelCase : str = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : Tuple = False
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = MaskFormerSwinModelTester(self )
UpperCAmelCase = ConfigTester(self ,config_class=__SCREAMING_SNAKE_CASE ,embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def _UpperCAmelCase ( self : Optional[Any] ):
pass
def _UpperCAmelCase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _UpperCAmelCase ( self : List[str] ):
return
def _UpperCAmelCase ( self : int ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : Tuple ):
UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__SCREAMING_SNAKE_CASE )
@unittest.skip("Swin does not use inputs_embeds" )
def _UpperCAmelCase ( self : List[str] ):
pass
@unittest.skip("Swin does not support feedforward chunking" )
def _UpperCAmelCase ( self : List[str] ):
pass
def _UpperCAmelCase ( self : Optional[Any] ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE ,nn.Linear ) )
def _UpperCAmelCase ( self : str ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase = [*signature.parameters.keys()]
UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,__SCREAMING_SNAKE_CASE )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def _UpperCAmelCase ( self : str ):
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def _UpperCAmelCase ( self : Tuple ):
pass
def _UpperCAmelCase ( self : Dict ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : Optional[int] ,__SCREAMING_SNAKE_CASE : List[Any] ,__SCREAMING_SNAKE_CASE : Optional[Any] ):
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
UpperCAmelCase = model(**self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase = outputs.hidden_states
UpperCAmelCase = getattr(
self.model_tester ,"expected_num_hidden_layers" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__SCREAMING_SNAKE_CASE ) ,__SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = 3
UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase = True
self.check_hidden_states_output(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def _UpperCAmelCase ( self : List[str] ):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _UpperCAmelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _UpperCAmelCase ( self : Dict ):
pass
def _UpperCAmelCase ( self : List[str] ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE : Union[str, Any] ):
UpperCAmelCase = 0
return t
def check_equivalence(__SCREAMING_SNAKE_CASE : Any ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : int ,__SCREAMING_SNAKE_CASE : str={} ):
with torch.no_grad():
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE )
UpperCAmelCase = model(**__SCREAMING_SNAKE_CASE ,return_dict=__SCREAMING_SNAKE_CASE ,**__SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(__SCREAMING_SNAKE_CASE : Tuple ,__SCREAMING_SNAKE_CASE : Union[str, Any] ):
if isinstance(__SCREAMING_SNAKE_CASE ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
recursive_check(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) ,set_nan_tensor_to_zero(__SCREAMING_SNAKE_CASE ) ,atol=1e-5 ) ,msg=(
"Tuple and dict output are not equal. Difference:"
f''' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'''
f''' {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}. Dict has'''
f''' `nan`: {torch.isnan(__SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(__SCREAMING_SNAKE_CASE )}.'''
) ,)
recursive_check(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
UpperCAmelCase = model_class(__SCREAMING_SNAKE_CASE )
model.to(__SCREAMING_SNAKE_CASE )
model.eval()
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,return_labels=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,{"output_hidden_states": True} )
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,return_labels=__SCREAMING_SNAKE_CASE )
UpperCAmelCase = self._prepare_for_class(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,return_labels=__SCREAMING_SNAKE_CASE )
check_equivalence(__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE ,{"output_hidden_states": True} )
@require_torch
class __magic_name__ ( unittest.TestCase , _snake_case):
_UpperCAmelCase : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_UpperCAmelCase : Any = MaskFormerSwinConfig
def _UpperCAmelCase ( self : Union[str, Any] ):
UpperCAmelCase = MaskFormerSwinModelTester(self )
def _UpperCAmelCase ( self : Optional[int] ):
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
UpperCAmelCase = backbone_class(__SCREAMING_SNAKE_CASE )
backbone.to(__SCREAMING_SNAKE_CASE )
backbone.eval()
UpperCAmelCase = backbone(**__SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,__SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
UpperCAmelCase = backbone(**__SCREAMING_SNAKE_CASE ,output_hidden_states=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
UpperCAmelCase = backbone(**__SCREAMING_SNAKE_CASE ,output_attentions=__SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions )
| 713 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _a):
@require_torch
def _UpperCAmelCase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Optional[int] ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task=\"fill-mask\", model=mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")\nsocket.socket = offline_socket\n "
# Force fetching the files so that we can use the cache
UpperCAmelCase = "hf-internal-testing/tiny-random-bert"
BertConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
BertModel.from_pretrained(__SCREAMING_SNAKE_CASE )
BertTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
pipeline(task="fill-mask" ,model=__SCREAMING_SNAKE_CASE )
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run, mock] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : str ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
UpperCAmelCase = "\nfrom transformers import BertConfig, BertModel, BertTokenizer\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert-sharded\"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint(\"success\")\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# next emulate no network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
@require_torch
def _UpperCAmelCase ( self : Dict ):
UpperCAmelCase = "\nfrom transformers import pipeline\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/tiny-random-bert\"\npipe = pipeline(model=mname)\n "
UpperCAmelCase = "\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")\nsocket.socket = offline_socket\n "
UpperCAmelCase = self.get_env()
UpperCAmelCase = "1"
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, mock, run] )]
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
"You cannot infer task automatically within `pipeline` when using offline mode" ,result.stderr.decode().replace("\n" ,"" ) ,)
@require_torch
def _UpperCAmelCase ( self : Any ):
UpperCAmelCase = "\nfrom transformers import AutoModel\n "
UpperCAmelCase = "\nmname = \"hf-internal-testing/test_dynamic_model\"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint(\"success\")\n "
# baseline - just load from_pretrained with normal network
UpperCAmelCase = [sys.executable, "-c", "\n".join([load, run] )]
# should succeed
UpperCAmelCase = self.get_env()
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
UpperCAmelCase = "1"
UpperCAmelCase = subprocess.run(__SCREAMING_SNAKE_CASE ,env=__SCREAMING_SNAKE_CASE ,check=__SCREAMING_SNAKE_CASE ,capture_output=__SCREAMING_SNAKE_CASE )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn("success" ,result.stdout.decode() )
| 405 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class SCREAMING_SNAKE_CASE ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ['''image_processor''', '''tokenizer''']
SCREAMING_SNAKE_CASE__ : Any = '''LayoutLMv3ImageProcessor'''
SCREAMING_SNAKE_CASE__ : List[str] = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : int , snake_case : Dict=None , snake_case : Optional[int]=None , **snake_case : List[Any] ):
"""simple docstring"""
_snake_case : Tuple = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , snake_case , )
_snake_case : Tuple = kwargs.pop('feature_extractor' )
_snake_case : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(snake_case , snake_case )
def __call__( self : Tuple , snake_case : Optional[int] , snake_case : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case : Union[List[List[int]], List[List[List[int]]]] = None , snake_case : Optional[Union[List[int], List[List[int]]]] = None , snake_case : bool = True , snake_case : Union[bool, str, PaddingStrategy] = False , snake_case : Union[bool, str, TruncationStrategy] = None , snake_case : Optional[int] = None , snake_case : int = 0 , snake_case : Optional[int] = None , snake_case : Optional[bool] = None , snake_case : Optional[bool] = None , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = False , snake_case : bool = True , snake_case : Optional[Union[str, TensorType]] = None , **snake_case : Optional[Any] , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
_snake_case : Dict = self.image_processor(images=snake_case , return_tensors=snake_case )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case , snake_case ):
_snake_case : Union[str, Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
_snake_case : Optional[Any] = features['words']
_snake_case : Optional[int] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=snake_case , add_special_tokens=snake_case , padding=snake_case , truncation=snake_case , max_length=snake_case , stride=snake_case , pad_to_multiple_of=snake_case , return_token_type_ids=snake_case , return_attention_mask=snake_case , return_overflowing_tokens=snake_case , return_special_tokens_mask=snake_case , return_offsets_mapping=snake_case , return_length=snake_case , verbose=snake_case , return_tensors=snake_case , **snake_case , )
# add pixel values
_snake_case : Optional[int] = features.pop('pixel_values' )
if return_overflowing_tokens is True:
_snake_case : List[Any] = self.get_overflowing_images(snake_case , encoded_inputs['overflow_to_sample_mapping'] )
_snake_case : Union[str, Any] = images
return encoded_inputs
def __UpperCAmelCase ( self : Union[str, Any] , snake_case : str , snake_case : Optional[Any] ):
"""simple docstring"""
_snake_case : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case ) != len(snake_case ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
F""" {len(snake_case )} and {len(snake_case )}""" )
return images_with_overflow
def __UpperCAmelCase ( self : Any , *snake_case : Tuple , **snake_case : Dict ):
"""simple docstring"""
return self.tokenizer.batch_decode(*snake_case , **snake_case )
def __UpperCAmelCase ( self : Dict , *snake_case : Optional[Any] , **snake_case : str ):
"""simple docstring"""
return self.tokenizer.decode(*snake_case , **snake_case )
@property
def __UpperCAmelCase ( self : Optional[int] ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , snake_case , )
return self.image_processor_class
@property
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , snake_case , )
return self.image_processor
| 517 |
'''simple docstring'''
def lowerCamelCase__ ( a__ , a__) -> float:
"""simple docstring"""
_validate_point(a__)
_validate_point(a__)
if len(a__) != len(a__):
raise ValueError('Both points must be in the same n-dimensional space')
return float(sum(abs(a - b) for a, b in zip(a__ , a__)))
def lowerCamelCase__ ( a__) -> None:
"""simple docstring"""
if point:
if isinstance(a__ , a__):
for item in point:
if not isinstance(a__ , (int, float)):
_snake_case : Any = (
'Expected a list of numbers as input, found '
F"""{type(a__).__name__}"""
)
raise TypeError(a__)
else:
_snake_case : Tuple = F"""Expected a list of numbers as input, found {type(a__).__name__}"""
raise TypeError(a__)
else:
raise ValueError('Missing an input')
def lowerCamelCase__ ( a__ , a__) -> float:
"""simple docstring"""
_validate_point(a__)
_validate_point(a__)
if len(a__) != len(a__):
raise ValueError('Both points must be in the same n-dimensional space')
return float(sum(abs(x - y) for x, y in zip(a__ , a__)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 517 | 1 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase__( __lowercase ):
'''simple docstring'''
__snake_case = 42
class lowerCAmelCase__( __lowercase , __lowercase ):
'''simple docstring'''
@register_to_config
def __init__( self , __lowerCamelCase = 3 , __lowerCamelCase = 3 , __lowerCamelCase = ("DownEncoderBlock2D",) , __lowerCamelCase = ("UpDecoderBlock2D",) , __lowerCamelCase = (6_4,) , __lowerCamelCase = 1 , __lowerCamelCase = "silu" , __lowerCamelCase = 3 , __lowerCamelCase = 3_2 , __lowerCamelCase = 2_5_6 , __lowerCamelCase = 3_2 , __lowerCamelCase = None , __lowerCamelCase = 0.1_8215 , __lowerCamelCase = "group" , ) -> List[str]:
super().__init__()
# pass init params to Encoder
_SCREAMING_SNAKE_CASE : Tuple = Encoder(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , down_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , act_fn=__lowerCamelCase , norm_num_groups=__lowerCamelCase , double_z=__lowerCamelCase , )
_SCREAMING_SNAKE_CASE : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
_SCREAMING_SNAKE_CASE : str = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
_SCREAMING_SNAKE_CASE : Tuple = VectorQuantizer(__lowerCamelCase , __lowerCamelCase , beta=0.25 , remap=__lowerCamelCase , sane_index_shape=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = nn.Convad(__lowerCamelCase , __lowerCamelCase , 1 )
# pass init params to Decoder
_SCREAMING_SNAKE_CASE : Dict = Decoder(
in_channels=__lowerCamelCase , out_channels=__lowerCamelCase , up_block_types=__lowerCamelCase , block_out_channels=__lowerCamelCase , layers_per_block=__lowerCamelCase , act_fn=__lowerCamelCase , norm_num_groups=__lowerCamelCase , norm_type=__lowerCamelCase , )
@apply_forward_hook
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = True ) -> VQEncoderOutput:
_SCREAMING_SNAKE_CASE : Optional[int] = self.encoder(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = self.quant_conv(__lowerCamelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCamelCase )
@apply_forward_hook
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = False , __lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Optional[int] = self.quantize(__lowerCamelCase )
else:
_SCREAMING_SNAKE_CASE : Dict = h
_SCREAMING_SNAKE_CASE : str = self.post_quant_conv(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = self.decoder(__lowerCamelCase , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase , __lowerCamelCase = True ) -> Union[DecoderOutput, torch.FloatTensor]:
_SCREAMING_SNAKE_CASE : List[Any] = sample
_SCREAMING_SNAKE_CASE : List[str] = self.encode(__lowerCamelCase ).latents
_SCREAMING_SNAKE_CASE : List[str] = self.decode(__lowerCamelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCamelCase ) | 381 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertJapaneseTokenizer
__snake_case = False
__snake_case = True
def UpperCamelCase_ ( self ) -> Dict:
super().setUp()
_SCREAMING_SNAKE_CASE : Dict = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
_SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[Any] = "こんにちは、世界。 \nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : str = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Dict:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : int = self.get_input_output_texts(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode(__lowerCamelCase , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = tokenizer.decode(__lowerCamelCase , clean_up_tokenization_spaces=__lowerCamelCase )
return text, ids
def UpperCamelCase_ ( self ) -> List[str]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> List[str]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : str = self.tokenizer_class(self.vocab_file )
_SCREAMING_SNAKE_CASE : int = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = "こんにちは、世界。\nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
_SCREAMING_SNAKE_CASE : Optional[Any] = pickle.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Any = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> str:
try:
_SCREAMING_SNAKE_CASE : List[Any] = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> List[Any]:
try:
_SCREAMING_SNAKE_CASE : Dict = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : Dict = MecabTokenizer(do_lower_case=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def UpperCamelCase_ ( self ) -> Dict:
try:
_SCREAMING_SNAKE_CASE : Any = MecabTokenizer(
do_lower_case=__lowerCamelCase , normalize_text=__lowerCamelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : List[str] = MecabTokenizer(normalize_text=__lowerCamelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Union[str, Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : int = "こんにちは、世界。\nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_SCREAMING_SNAKE_CASE : Optional[int] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
_SCREAMING_SNAKE_CASE : List[str] = pickle.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_sudachi
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def UpperCamelCase_ ( self ) -> int:
_SCREAMING_SNAKE_CASE : Tuple = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def UpperCamelCase_ ( self ) -> Optional[Any]:
_SCREAMING_SNAKE_CASE : List[str] = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Dict = SudachiTokenizer(do_lower_case=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : List[Any] = SudachiTokenizer(normalize_text=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Union[str, Any] = SudachiTokenizer(trim_whitespace=__lowerCamelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Dict:
_SCREAMING_SNAKE_CASE : Optional[Any] = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Union[str, Any] = "こんにちは、世界。\nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : Dict = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 1_2, 1_0, 1_4, 4, 9, 1_2, 1_0, 1_4] )
_SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(__lowerCamelCase , "wb" ) as handle:
pickle.dump(__lowerCamelCase , __lowerCamelCase )
with open(__lowerCamelCase , "rb" ) as handle:
_SCREAMING_SNAKE_CASE : Any = pickle.load(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer_new.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Optional[Any] = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Tuple:
_SCREAMING_SNAKE_CASE : List[Any] = JumanppTokenizer(do_lower_case=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = JumanppTokenizer(normalize_text=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : List[str] = JumanppTokenizer(trim_whitespace=__lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Tuple = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def UpperCamelCase_ ( self ) -> List[str]:
_SCREAMING_SNAKE_CASE : int = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
_SCREAMING_SNAKE_CASE : str = {}
for i, token in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : List[str] = i
_SCREAMING_SNAKE_CASE : Union[str, Any] = WordpieceTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def UpperCamelCase_ ( self ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : int = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
_SCREAMING_SNAKE_CASE : str = tokenizer.subword_tokenizer
_SCREAMING_SNAKE_CASE : Union[str, Any] = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(__lowerCamelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
_SCREAMING_SNAKE_CASE : Tuple = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(__lowerCamelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : Tuple = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
_SCREAMING_SNAKE_CASE : List[Any] = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__( __lowercase , unittest.TestCase ):
'''simple docstring'''
__snake_case = BertJapaneseTokenizer
__snake_case = False
def UpperCamelCase_ ( self ) -> Union[str, Any]:
super().setUp()
_SCREAMING_SNAKE_CASE : List[str] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
_SCREAMING_SNAKE_CASE : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def UpperCamelCase_ ( self , **__lowerCamelCase ) -> List[Any]:
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **__lowerCamelCase )
def UpperCamelCase_ ( self , __lowerCamelCase ) -> Optional[int]:
_SCREAMING_SNAKE_CASE : Optional[int] = "こんにちは、世界。 \nこんばんは、世界。"
_SCREAMING_SNAKE_CASE : Dict = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def UpperCamelCase_ ( self ) -> Tuple:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> int:
pass # TODO add if relevant
def UpperCamelCase_ ( self ) -> Any:
_SCREAMING_SNAKE_CASE : int = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
_SCREAMING_SNAKE_CASE : Any = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
__lowerCamelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , [3, 4, 5, 6, 7, 1_1, 9, 1_0, 1_2, 3, 4, 8, 4, 7, 1_1, 9, 1_0, 1_2] )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
_SCREAMING_SNAKE_CASE : Optional[Any] = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
_SCREAMING_SNAKE_CASE : Union[str, Any] = {}
for i, token in enumerate(__lowerCamelCase ):
_SCREAMING_SNAKE_CASE : int = i
_SCREAMING_SNAKE_CASE : List[Any] = CharacterTokenizer(vocab=__lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : Optional[int] = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
_SCREAMING_SNAKE_CASE : int = tokenizer.encode("ありがとう。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Any = tokenizer.encode("どういたしまして。" , add_special_tokens=__lowerCamelCase )
_SCREAMING_SNAKE_CASE : Dict = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
_SCREAMING_SNAKE_CASE : str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> List[Any]:
_SCREAMING_SNAKE_CASE : str = "cl-tohoku/bert-base-japanese"
_SCREAMING_SNAKE_CASE : List[Any] = AutoTokenizer.from_pretrained(__lowerCamelCase )
self.assertIsInstance(__lowerCamelCase , __lowerCamelCase )
class lowerCAmelCase__( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self ) -> str:
_SCREAMING_SNAKE_CASE : Optional[Any] = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
_SCREAMING_SNAKE_CASE : Optional[Any] = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(__lowerCamelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) ) | 381 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self : int , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Optional[Any]=1_0 , __UpperCamelCase : List[Any]=1_8 , __UpperCamelCase : Union[str, Any]=3_0 , __UpperCamelCase : Dict=4_0_0 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : str=None , __UpperCamelCase : Any=True , __UpperCamelCase : Any=[0.5, 0.5, 0.5] , __UpperCamelCase : Any=[0.5, 0.5, 0.5] , __UpperCamelCase : Optional[Any]=None , )->Optional[int]:
_UpperCAmelCase = size if size is not None else {'''shortest_edge''': 1_8}
_UpperCAmelCase = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = num_frames
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = crop_size
def lowercase__ ( self : Optional[Any] )->str:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = VivitImageProcessor if is_vision_available() else None
def lowercase__ ( self : List[Any] )->Optional[Any]:
_UpperCAmelCase = VivitImageProcessingTester(self )
@property
def lowercase__ ( self : List[str] )->Any:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : Tuple )->Union[str, Any]:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , '''image_mean''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''image_std''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_normalize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_resize''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''do_center_crop''' ) )
self.assertTrue(hasattr(__UpperCamelCase , '''size''' ) )
def lowercase__ ( self : str )->Optional[int]:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size , {'''height''': 1_8, '''width''': 1_8} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size , {'''height''': 8_4, '''width''': 8_4} )
def lowercase__ ( self : Union[str, Any] )->Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL videos
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Tuple )->List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Union[str, Any] )->Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_video_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for video in video_inputs:
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
self.assertIsInstance(video[0] , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(video_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 602 |
"""simple docstring"""
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase , _UpperCAmelCase = divmod(_SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(_SCREAMING_SNAKE_CASE ) + str(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = str(_SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError('''No input value was provided''' )
_UpperCAmelCase = '''-''' if number.startswith('''-''' ) else ''''''
_UpperCAmelCase = number.lstrip('''-''' )
if not number.isnumeric():
raise ValueError('''Input value is not an integer''' )
return f'{negative}0b{binary_recursive(int(_SCREAMING_SNAKE_CASE ) )}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 602 | 1 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
snake_case_ : Any = 1_6
snake_case_ : List[Any] = 3_2
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Accelerator , SCREAMING_SNAKE_CASE_ : int = 1_6 ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = AutoTokenizer.from_pretrained("bert-base-cased" )
SCREAMING_SNAKE_CASE_ : str = load_dataset("glue" , "mrpc" )
def tokenize_function(SCREAMING_SNAKE_CASE_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE_ : List[str] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE_ : List[Any] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE_ : Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(SCREAMING_SNAKE_CASE_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE_ : Any = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE_ : Dict = 8
else:
SCREAMING_SNAKE_CASE_ : List[Any] = None
return tokenizer.pad(
SCREAMING_SNAKE_CASE_ , padding="longest" , max_length=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_tensors="pt" , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE_ : List[Any] = DataLoader(
tokenized_datasets["train"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
snake_case_ : Optional[int] = mocked_dataloaders # noqa: F811
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : str ) -> Optional[int]:
"""simple docstring"""
if os.environ.get("TESTING_MOCKED_DATALOADERS" , SCREAMING_SNAKE_CASE_ ) == "1":
SCREAMING_SNAKE_CASE_ : Dict = 2
# New Code #
SCREAMING_SNAKE_CASE_ : Optional[Any] = int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE_ : str = int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE_ : Optional[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=SCREAMING_SNAKE_CASE_ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError("LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)" )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE_ : Optional[int] = config["lr"]
SCREAMING_SNAKE_CASE_ : Optional[int] = int(config["num_epochs"] )
SCREAMING_SNAKE_CASE_ : Optional[int] = int(config["seed"] )
SCREAMING_SNAKE_CASE_ : Dict = int(config["batch_size"] )
SCREAMING_SNAKE_CASE_ : Tuple = evaluate.load("glue" , "mrpc" )
set_seed(SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE_ : int = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=SCREAMING_SNAKE_CASE_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE_ : Any = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE_ : Tuple = AdamW(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE_ : Any = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=1_0_0 , num_training_steps=(len(SCREAMING_SNAKE_CASE_ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE_ : int = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ ):
model.train()
with LocalSGD(
accelerator=SCREAMING_SNAKE_CASE_ , model=SCREAMING_SNAKE_CASE_ , local_sgd_steps=SCREAMING_SNAKE_CASE_ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(SCREAMING_SNAKE_CASE_ ):
SCREAMING_SNAKE_CASE_ : int = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Dict = output.loss
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[int] = model(**SCREAMING_SNAKE_CASE_ )
SCREAMING_SNAKE_CASE_ : Any = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
SCREAMING_SNAKE_CASE_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , SCREAMING_SNAKE_CASE_ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
# New Code #
parser.add_argument(
"--gradient_accumulation_steps" , type=SCREAMING_SNAKE_CASE_ , default=1 , help="The number of minibatches to be ran before gradients are accumulated." , )
parser.add_argument(
"--local_sgd_steps" , type=SCREAMING_SNAKE_CASE_ , default=8 , help="Number of local SGD steps or None to disable local SGD" )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
SCREAMING_SNAKE_CASE_ : Tuple = parser.parse_args()
SCREAMING_SNAKE_CASE_ : Any = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main()
| 712 |
'''simple docstring'''
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
snake_case_ = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=5_1_2,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : Optional[int] ) -> Dict:
"""simple docstring"""
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
snake_case_ = parser.parse_args()
snake_case_ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 68 | 0 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class a_ ( A__ , unittest.TestCase ):
A = MvpTokenizer
A = MvpTokenizerFast
A = True
A = filter_roberta_detectors
def A_( self ) -> int:
"""simple docstring"""
super().setUp()
SCREAMING_SNAKE_CASE_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE_ = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
SCREAMING_SNAKE_CASE_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE_ = {'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_lowerCamelCase ) )
def A_( self , **SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def A_( self , **SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def A_( self , SCREAMING_SNAKE_CASE ) -> Any:
"""simple docstring"""
return "lower newer", "lower newer"
@cached_property
def A_( self ) -> Tuple:
"""simple docstring"""
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def A_( self ) -> Optional[int]:
"""simple docstring"""
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def A_( self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE_ = [0, 250, 251, 1_7818, 13, 3_9186, 1938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCamelCase , max_length=len(_lowerCamelCase ) , padding=_lowerCamelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ = batch.input_ids.tolist()[0]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
# Test that special tokens are reset
@require_torch
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , _lowerCamelCase )
self.assertIn('attention_mask' , _lowerCamelCase )
self.assertNotIn('labels' , _lowerCamelCase )
self.assertNotIn('decoder_attention_mask' , _lowerCamelCase )
@require_torch
def A_( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(text_target=_lowerCamelCase , max_length=32 , padding='max_length' , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
@require_torch
def A_( self ) -> Dict:
"""simple docstring"""
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(
['I am a small frog' * 1024, 'I am a small frog'] , padding=_lowerCamelCase , truncation=_lowerCamelCase , return_tensors='pt' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
self.assertEqual(batch.input_ids.shape , (2, 1024) )
@require_torch
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = ['A long paragraph for summarization.']
SCREAMING_SNAKE_CASE_ = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ = tokenizer(_lowerCamelCase , text_target=_lowerCamelCase , return_tensors='pt' )
SCREAMING_SNAKE_CASE_ = inputs['input_ids']
SCREAMING_SNAKE_CASE_ = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def A_( self ) -> int:
"""simple docstring"""
pass
def A_( self ) -> List[Any]:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 'A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE_ = tokenizer_r.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = tokenizer_p.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase , return_token_type_ids=_lowerCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
SCREAMING_SNAKE_CASE_ = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
SCREAMING_SNAKE_CASE_ = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
_lowerCamelCase , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 205 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a__ ( A__ ):
UpperCAmelCase__ = ''''''
UpperCAmelCase__ = '''hf-legacy''' # "hf://"" is reserved for hffs
def __init__( self :Dict , _lowerCamelCase :Optional[DatasetInfo] = None , _lowerCamelCase :Optional[str] = None , **_lowerCamelCase :Tuple , ):
'''simple docstring'''
super().__init__(self , **_lowerCamelCase )
UpperCamelCase_ : List[str] =repo_info
UpperCamelCase_ : Any =token
UpperCamelCase_ : Tuple =None
def lowerCamelCase_ ( self :Dict ):
'''simple docstring'''
if self.dir_cache is None:
UpperCamelCase_ : Any ={}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
UpperCamelCase_ : Optional[Any] ={
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(_lowerCamelCase ): {'name': str(_lowerCamelCase ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCamelCase_ ( self :Union[str, Any] , _lowerCamelCase :str , _lowerCamelCase :str = "rb" , **_lowerCamelCase :str , ):
'''simple docstring'''
if not isinstance(self.repo_info , _lowerCamelCase ):
raise NotImplementedError(f'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
UpperCamelCase_ : List[Any] =hf_hub_url(self.repo_info.id , _lowerCamelCase , revision=self.repo_info.sha )
return fsspec.open(
_lowerCamelCase , mode=_lowerCamelCase , headers=get_authentication_headers_for_url(_lowerCamelCase , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :Tuple , **_lowerCamelCase :Any ):
'''simple docstring'''
self._get_dirs()
UpperCamelCase_ : Tuple =self._strip_protocol(_lowerCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(_lowerCamelCase )
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :List[str] , _lowerCamelCase :List[Any]=False , **_lowerCamelCase :Any ):
'''simple docstring'''
self._get_dirs()
UpperCamelCase_ : str =PurePosixPath(path.strip('/' ) )
UpperCamelCase_ : List[str] ={}
for p, f in self.dir_cache.items():
UpperCamelCase_ : List[Any] =PurePosixPath(p.strip('/' ) )
UpperCamelCase_ : Tuple =p.parent
if root == path:
UpperCamelCase_ : int =f
UpperCamelCase_ : Optional[int] =list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 357 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE = StableDiffusionInpaintPipeline
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__SCREAMING_SNAKE_CASE = frozenset([] )
def A ( self : Optional[Any] ):
"""simple docstring"""
torch.manual_seed(0 )
__snake_case = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=9 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a_ , )
__snake_case = PNDMScheduler(skip_prk_steps=a_ )
torch.manual_seed(0 )
__snake_case = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__snake_case = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act="gelu" , projection_dim=512 , )
__snake_case = CLIPTextModel(a_ )
__snake_case = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
__snake_case = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A ( self : Tuple , a_ : int , a_ : Optional[int]=0 ):
"""simple docstring"""
__snake_case = floats_tensor((1, 3, 32, 32) , rng=random.Random(a_ ) ).to(a_ )
__snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__snake_case = Image.fromarray(np.uinta(a_ ) ).convert("RGB" ).resize((64, 64) )
__snake_case = Image.fromarray(np.uinta(image + 4 ) ).convert("RGB" ).resize((64, 64) )
if str(a_ ).startswith("mps" ):
__snake_case = torch.manual_seed(a_ )
else:
__snake_case = torch.Generator(device=a_ ).manual_seed(a_ )
__snake_case = {
"prompt": "A painting of a squirrel eating a burger",
"image": init_image,
"mask_image": mask_image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def A ( self : List[Any] ):
"""simple docstring"""
__snake_case = "cpu" # ensure determinism for the device-dependent torch.Generator
__snake_case = self.get_dummy_components()
__snake_case = StableDiffusionInpaintPipeline(**a_ )
__snake_case = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
__snake_case = self.get_dummy_inputs(a_ )
__snake_case = sd_pipe(**a_ ).images
__snake_case = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__snake_case = np.array([0.4727, 0.5735, 0.3941, 0.5446, 0.5926, 0.4394, 0.5062, 0.4654, 0.4476] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A ( self : List[Any] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def A ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A ( self : Dict ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench.npy" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(a_ , safety_checker=a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def A ( self : Any ):
"""simple docstring"""
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint"
"/yellow_cat_sitting_on_a_park_bench_fp16.npy" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(
a_ , torch_dtype=torch.floataa , safety_checker=a_ , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , output_type="np" , )
__snake_case = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def A ( self : List[Any] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-inpaint/init_image.png" )
__snake_case = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png" )
__snake_case = "stabilityai/stable-diffusion-2-inpainting"
__snake_case = PNDMScheduler.from_pretrained(a_ , subfolder="scheduler" )
__snake_case = StableDiffusionInpaintPipeline.from_pretrained(
a_ , safety_checker=a_ , scheduler=a_ , torch_dtype=torch.floataa , )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__snake_case = "Face of a yellow cat, high resolution, sitting on a park bench"
__snake_case = torch.manual_seed(0 )
__snake_case = pipe(
prompt=a_ , image=a_ , mask_image=a_ , generator=a_ , num_inference_steps=2 , output_type="np" , )
__snake_case = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 680 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 680 | 1 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
A_ : List[str] = yaml.safe_load(
"\\nname: \"\"\nallow_empty: false\nallow_empty_text: true\nsubsections:\n - name: \"Dataset Card for X\" # First-level markdown heading\n allow_empty: false\n allow_empty_text: true\n subsections:\n - name: \"Table of Contents\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Dataset Description\"\n allow_empty: false\n allow_empty_text: false\n subsections:\n - name: \"Dataset Summary\"\n allow_empty: false\n allow_empty_text: false\n subsections: null\n - name: \"Supported Tasks and Leaderboards\"\n allow_empty: true\n allow_empty_text: true\n subsections: null\n - name: Languages\n allow_empty: false\n allow_empty_text: true\n subsections: null\n"
)
A_ : List[str] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : str = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n#### Extra Ignored Subsection\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Optional[Any] = {
"name": "root",
"text": "",
"is_empty_text": True,
"subsections": [
{
"name": "Dataset Card for My Dataset",
"text": "",
"is_empty_text": True,
"subsections": [
{"name": "Table of Contents", "text": "Some text here.", "is_empty_text": False, "subsections": []},
{
"name": "Dataset Description",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Dataset Summary",
"text": "Some text here.",
"is_empty_text": False,
"subsections": [
{
"name": "Extra Ignored Subsection",
"text": "",
"is_empty_text": True,
"subsections": [],
}
],
},
{
"name": "Supported Tasks and Leaderboards",
"text": "",
"is_empty_text": True,
"subsections": [],
},
{"name": "Languages", "text": "Language Text", "is_empty_text": False, "subsections": []},
],
},
],
}
],
}
A_ : int = "\\n---\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : List[str] = (
"The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README."
)
A_ : Union[str, Any] = "\\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : int = (
"The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README."
)
A_ : Optional[int] = "\\n---\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored)."
A_ : Optional[int] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n"
A_ : Any = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found 'None'."
A_ : Union[str, Any] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Languages\nLanguage Text\n"
A_ : List[Any] = "The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\n"
A_ : str = "The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty."
A_ : List[str] = "\\n---\nlanguage:\n- zh\n- en\n---\n\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Dict = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README."
A_ : Any = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n# Dataset Card My Dataset\n"
A_ : str = "The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README."
A_ : Dict = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : List[str] = "The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README."
A_ : List[str] = ""
A_ : Union[str, Any] = "The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README."
A_ : Tuple = "\\n---\nlanguage:\n- zh\n- en\n---\n\n# Dataset Card for My Dataset\n# Dataset Card for My Dataset\n## Table of Contents\nSome text here.\n## Dataset Description\nSome text here.\n### Dataset Summary\nSome text here.\n### Supported Tasks and Leaderboards\n### Languages\nLanguage Text\n"
A_ : Tuple = "The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections."
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
assert ReadMe.from_string(__magic_name__ , __magic_name__ ).to_dict() == expected_dict
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( __magic_name__ : int , __magic_name__ : List[Any] ) -> Any:
'''simple docstring'''
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
snake_case__ : Optional[Any] = ReadMe.from_string(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Union[str, Any] , __magic_name__ : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with pytest.raises(__magic_name__ , match=re.escape(expected_error.format(path="""root""" ) ) ):
ReadMe.from_string(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] ) -> str:
'''simple docstring'''
ReadMe.from_string(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
@pytest.mark.parametrize(
"""readme_md, expected_dict""" , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def UpperCamelCase__ ( __magic_name__ : Optional[Any] , __magic_name__ : Optional[Any] ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Dict = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : Optional[Any] = ReadMe.from_readme(__magic_name__ , __magic_name__ ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Any ) -> Tuple:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : Tuple = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
snake_case__ : Tuple = ReadMe.from_readme(__magic_name__ , __magic_name__ )
readme.validate()
@pytest.mark.parametrize(
"""readme_md, expected_error""" , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : Tuple , __magic_name__ : Tuple ) -> int:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : Any = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
snake_case__ : List[str] = expected_error.format(path=__magic_name__ )
with pytest.raises(__magic_name__ , match=re.escape(__magic_name__ ) ):
ReadMe.from_readme(__magic_name__ , __magic_name__ )
@pytest.mark.parametrize(
"""readme_md,""" , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def UpperCamelCase__ ( __magic_name__ : List[str] ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
snake_case__ : int = Path(__magic_name__ ) / """README.md"""
with open(__magic_name__ , """w+""" ) as readme_file:
readme_file.write(__magic_name__ )
ReadMe.from_readme(__magic_name__ , __magic_name__ , suppress_parsing_errors=__magic_name__ )
| 38 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
SCREAMING_SNAKE_CASE__ : Optional[Any] = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a_ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=14 , SCREAMING_SNAKE_CASE=10 , SCREAMING_SNAKE_CASE=19 , SCREAMING_SNAKE_CASE=5 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=[1, 2, 3, 4, 5] , SCREAMING_SNAKE_CASE=25 , SCREAMING_SNAKE_CASE=5 , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = prediction_length
SCREAMING_SNAKE_CASE_ = context_length
SCREAMING_SNAKE_CASE_ = cardinality
SCREAMING_SNAKE_CASE_ = num_time_features
SCREAMING_SNAKE_CASE_ = lags_sequence
SCREAMING_SNAKE_CASE_ = embedding_dimension
SCREAMING_SNAKE_CASE_ = is_training
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = context_length
SCREAMING_SNAKE_CASE_ = prediction_length + label_length
SCREAMING_SNAKE_CASE_ = label_length
SCREAMING_SNAKE_CASE_ = moving_average
SCREAMING_SNAKE_CASE_ = autocorrelation_factor
def A_( self ) -> List[str]:
"""simple docstring"""
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A_( self , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = config.context_length + max(config.lags_sequence )
SCREAMING_SNAKE_CASE_ = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, _past_length] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, config.prediction_length] )
SCREAMING_SNAKE_CASE_ = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.get_config()
SCREAMING_SNAKE_CASE_ = self.prepare_autoformer_inputs_dict(SCREAMING_SNAKE_CASE )
return config, inputs_dict
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
return config, inputs_dict
def A_( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerModel(config=SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE_ = model(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = model.get_encoder()
encoder.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = AutoformerEncoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.create_network_inputs(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
SCREAMING_SNAKE_CASE_ = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
SCREAMING_SNAKE_CASE_ = encoder(inputs_embeds=SCREAMING_SNAKE_CASE )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
SCREAMING_SNAKE_CASE_ = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
SCREAMING_SNAKE_CASE_ = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
SCREAMING_SNAKE_CASE_ = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
SCREAMING_SNAKE_CASE_ = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE_ = model.get_decoder()
decoder.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = AutoformerDecoder.from_pretrained(SCREAMING_SNAKE_CASE ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = decoder(
trend=SCREAMING_SNAKE_CASE , inputs_embeds=SCREAMING_SNAKE_CASE , encoder_hidden_states=SCREAMING_SNAKE_CASE , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
A = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
A = (AutoformerForPrediction,) if is_torch_available() else ()
A = {'''feature-extraction''': AutoformerModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
A = False
A = False
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE )
def A_( self ) -> int:
"""simple docstring"""
self.config_tester.run_common_tests()
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = model_class.from_pretrained(SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
self.assertEqual(info['missing_keys'] , [] )
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*SCREAMING_SNAKE_CASE )
@unittest.skip(reason='Model has no tokens embeddings' )
def A_( self ) -> Union[str, Any]:
"""simple docstring"""
pass
def A_( self ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = inspect.signature(getattr(SCREAMING_SNAKE_CASE , 'forward' ) )
# The main input is the name of the argument after `self`
SCREAMING_SNAKE_CASE_ = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , SCREAMING_SNAKE_CASE )
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(SCREAMING_SNAKE_CASE )] , SCREAMING_SNAKE_CASE )
def A_( self ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'seq_length' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'decoder_seq_length' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'encoder_seq_length' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'd_model' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = getattr(self.model_tester , 'num_attention_heads' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = d_model // num_attention_heads
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
SCREAMING_SNAKE_CASE_ = len(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# decoder attentions
SCREAMING_SNAKE_CASE_ = outputs.decoder_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
SCREAMING_SNAKE_CASE_ = outputs.cross_attentions
self.assertIsInstance(SCREAMING_SNAKE_CASE , (list, tuple) )
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = model_class(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + 2 , len(SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A_( self ) -> Any:
"""simple docstring"""
super().test_retain_grad_hidden_states_attentions()
def lowercase ( SCREAMING_SNAKE_CASE="train-batch.pt" ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE_ = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=SCREAMING_SNAKE_CASE , repo_type='dataset' )
SCREAMING_SNAKE_CASE_ = torch.load(SCREAMING_SNAKE_CASE , map_location=SCREAMING_SNAKE_CASE )
return batch
@require_torch
@slow
class a_ ( unittest.TestCase ):
def A_( self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = prepare_batch()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
SCREAMING_SNAKE_CASE_ = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def A_( self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = prepare_batch('val-batch.pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
SCREAMING_SNAKE_CASE_ = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(output[0, :3, :3] , SCREAMING_SNAKE_CASE , atol=SCREAMING_SNAKE_CASE ) )
def A_( self ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = prepare_batch('val-batch.pt' )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
SCREAMING_SNAKE_CASE_ = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = torch.tensor([3_1_3_0.6_7_6_3, 4_0_5_6.5_2_9_3, 7_0_5_3.0_7_8_6] , device=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , SCREAMING_SNAKE_CASE , rtol=1e-1 ) )
| 205 | 0 |
__snake_case : Optional[Any] ='\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__snake_case : Tuple =[{'type': 'code', 'content': INSTALL_CONTENT}]
__snake_case : Any ={
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 90 |
def lowerCAmelCase__ ( lowerCamelCase_ : int):
'''simple docstring'''
if not isinstance(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : Dict = f"""Input value of [number={number}] must be an integer"""
raise TypeError(lowerCamelCase_)
if number < 1:
lowerCAmelCase__ : Any = f"""Input value of [number={number}] must be > 0"""
raise ValueError(lowerCamelCase_)
lowerCAmelCase__ : List[str] = 1
for i in range(1 ,lowerCamelCase_):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 1 |
'''simple docstring'''
A__: Optional[int] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
A__: List[Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
A__: int = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 694 |
'''simple docstring'''
from __future__ import annotations
class A__ :
def __init__( self :str , SCREAMING_SNAKE_CASE :int ) -> None:
'''simple docstring'''
_a : int =order
# a_{0} ... a_{k}
_a : Optional[Any] =[1.0] + [0.0] * order
# b_{0} ... b_{k}
_a : Tuple =[1.0] + [0.0] * order
# x[n-1] ... x[n-k]
_a : List[Any] =[0.0] * self.order
# y[n-1] ... y[n-k]
_a : Tuple =[0.0] * self.order
def __UpperCAmelCase ( self :int , SCREAMING_SNAKE_CASE :list[float] , SCREAMING_SNAKE_CASE :list[float] ) -> None:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) < self.order:
_a : int =[1.0, *a_coeffs]
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : int =(
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
if len(SCREAMING_SNAKE_CASE ) != self.order + 1:
_a : Optional[Any] =(
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(SCREAMING_SNAKE_CASE )}"
)
raise ValueError(SCREAMING_SNAKE_CASE )
_a : List[str] =a_coeffs
_a : Union[str, Any] =b_coeffs
def __UpperCAmelCase ( self :List[Any] , SCREAMING_SNAKE_CASE :float ) -> float:
'''simple docstring'''
_a : str =0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
_a : Any =(result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
_a : str =self.input_history[:-1]
_a : Optional[Any] =self.output_history[:-1]
_a : Optional[int] =sample
_a : Tuple =result
return result
| 694 | 1 |
import string
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = ''''''
for i in sequence:
UpperCAmelCase = ord(A__ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def _lowerCAmelCase ( A__: str ):
'''simple docstring'''
UpperCAmelCase = string.ascii_letters
UpperCAmelCase = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(A__ )] if c in letters else c for c in sequence )
def _lowerCAmelCase ( ):
'''simple docstring'''
from timeit import timeit
print('''Running performance benchmarks...''' )
UpperCAmelCase = '''from string import printable ; from __main__ import atbash, atbash_slow'''
print(F"""> atbash_slow(): {timeit('atbash_slow(printable)' , setup=A__ )} seconds""" )
print(F"""> atbash(): {timeit('atbash(printable)' , setup=A__ )} seconds""" )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f'''{example} encrypted in atbash: {atbash(example)}''')
benchmark()
| 391 |
from math import factorial
def _lowerCAmelCase ( A__: int , A__: int ):
'''simple docstring'''
if n < k or k < 0:
raise ValueError('''Please enter positive integers for n and k where n >= k''' )
return factorial(A__ ) // (factorial(A__ ) * factorial(n - k ))
if __name__ == "__main__":
print(
"The number of five-card hands possible from a standard",
f'''fifty-two card deck is: {combinations(52, 5)}\n''',
)
print(
"If a class of 40 students must be arranged into groups of",
f'''4 for group projects, there are {combinations(40, 4)} ways''',
"to arrange them.\n",
)
print(
"If 10 teams are competing in a Formula One race, there",
f'''are {combinations(10, 3)} ways that first, second and''',
"third place can be awarded.",
)
| 391 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
_snake_case : List[Any] = logging.get_logger(__name__)
_snake_case : str = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
_snake_case : Tuple = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def _A ( __snake_case :Any ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
with open(__snake_case , "r" ) as file:
for line_number, line in enumerate(__snake_case ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def _A ( __snake_case :Dict , __snake_case :str , __snake_case :Any , __snake_case :int , __snake_case :str ) -> Tuple:
"""simple docstring"""
for attribute in key.split("." ):
__SCREAMING_SNAKE_CASE = getattr(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split("." )[-1]]
__SCREAMING_SNAKE_CASE = "param"
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(__snake_case , __snake_case ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split("." ):
__SCREAMING_SNAKE_CASE = getattr(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f'''Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}''' )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split("." ):
__SCREAMING_SNAKE_CASE = getattr(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f'''{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.''' )
def _A ( __snake_case :Optional[Any] , __snake_case :Union[str, Any] , __snake_case :Tuple , __snake_case :str , __snake_case :Dict ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__snake_case ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split("." )[-1]]
__SCREAMING_SNAKE_CASE = "param"
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = ".".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = ".".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if "lm_head" in full_key else value[0]
_snake_case : List[str] = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def _A ( __snake_case :Any , __snake_case :Optional[Any] , __snake_case :Tuple=None , __snake_case :int=None ) -> Optional[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = "wav2vec2." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__snake_case )[0].split("." )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("*" , __snake_case )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = "weight_v"
elif "bias" in name:
__SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = "weight"
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
else:
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
return is_used
return is_used
def _A ( __snake_case :Optional[int] , __snake_case :str , __snake_case :Optional[Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == "group" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(__snake_case , __snake_case , __snake_case )
if not is_used:
unused_weights.append(__snake_case )
logger.warning(f'''Unused weights: {unused_weights}''' )
def _A ( __snake_case :Union[str, Any] , __snake_case :int , __snake_case :List[Any] , __snake_case :List[str] , __snake_case :int ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = full_name.split("conv_layers." )[-1]
__SCREAMING_SNAKE_CASE = name.split("." )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
__SCREAMING_SNAKE_CASE = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _A ( __snake_case :List[str] , __snake_case :List[Any] , __snake_case :Union[str, Any]=None , __snake_case :Tuple=None , __snake_case :Tuple=True , __snake_case :Union[str, Any]=False ) -> Tuple:
"""simple docstring"""
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(__snake_case )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(__snake_case )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(__snake_case )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
feature_extractor.save_pretrained(__snake_case )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(__snake_case , "vocab.json" )
if not os.path.isdir(__snake_case ):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(__snake_case , "w" , encoding="utf-8" ) as vocab_handle:
json.dump(__snake_case , __snake_case )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=__snake_case , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == "layer" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(__snake_case )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(__snake_case )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="audio_pretraining" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__snake_case )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , not is_finetuned )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
_snake_case : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
_snake_case : Tuple = parser.parse_args()
_snake_case : Any = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 693 |
def _A ( __snake_case :int = 400_0000 ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(__snake_case )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = b, a + b
return sum(__snake_case )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 693 | 1 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
UpperCAmelCase__ : Dict = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
UpperCAmelCase__ : Optional[int] = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
UpperCAmelCase__ : List[str] = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def A ( snake_case__ : List[str] , snake_case__ : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return float((preds == labels).mean() )
def A ( snake_case__ : Tuple , snake_case__ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
__snake_case = simple_accuracy(snake_case__ , snake_case__ )
__snake_case = float(fa_score(y_true=snake_case__ , y_pred=snake_case__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def A ( snake_case__ : List[Any] , snake_case__ : Any ) -> str:
'''simple docstring'''
__snake_case = np.array(snake_case__ )
__snake_case = np.array(snake_case__ )
__snake_case = en_sentvecs.shape[0]
# mean centering
__snake_case = en_sentvecs - np.mean(snake_case__ , axis=0 )
__snake_case = in_sentvecs - np.mean(snake_case__ , axis=0 )
__snake_case = cdist(snake_case__ , snake_case__ , 'cosine' )
__snake_case = np.array(range(snake_case__ ) )
__snake_case = sim.argsort(axis=1 )[:, :10]
__snake_case = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _a ( self) -> Union[str, Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
'references': datasets.Value('int64')
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32')),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def _a ( self , lowercase_ , lowercase_) -> List[str]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowercase_ , lowercase_)}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowercase_ , lowercase_)
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowercase_ , lowercase_)}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]')
| 700 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase__ : Any = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowercase ( lowerCamelCase__ ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , ) -> List[str]:
super().__init__()
if safety_checker is None:
logger.warning(
F"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .')
self.register_modules(
speech_model=lowercase_ , speech_processor=lowercase_ , vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , feature_extractor=lowercase_ , )
def _a ( self , lowercase_ = "auto") -> Union[str, Any]:
if slice_size == "auto":
__snake_case = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_)
def _a ( self) -> Any:
self.enable_attention_slicing(lowercase_)
@torch.no_grad()
def __call__( self , lowercase_ , lowercase_=1_6_0_0_0 , lowercase_ = 5_1_2 , lowercase_ = 5_1_2 , lowercase_ = 5_0 , lowercase_ = 7.5 , lowercase_ = None , lowercase_ = 1 , lowercase_ = 0.0 , lowercase_ = None , lowercase_ = None , lowercase_ = "pil" , lowercase_ = True , lowercase_ = None , lowercase_ = 1 , **lowercase_ , ) -> List[str]:
__snake_case = self.speech_processor.feature_extractor(
lowercase_ , return_tensors='pt' , sampling_rate=lowercase_).input_features.to(self.device)
__snake_case = self.speech_model.generate(lowercase_ , max_length=4_8_0_0_0_0)
__snake_case = self.speech_processor.tokenizer.batch_decode(lowercase_ , skip_special_tokens=lowercase_ , normalize=lowercase_)[
0
]
if isinstance(lowercase_ , lowercase_):
__snake_case = 1
elif isinstance(lowercase_ , lowercase_):
__snake_case = len(lowercase_)
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(lowercase_)}")
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowercase_ , lowercase_) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(lowercase_)}.")
# get prompt text embeddings
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__snake_case = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__snake_case = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :])
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F" {self.tokenizer.model_max_length} tokens: {removed_text}")
__snake_case = text_input_ids[:, : self.tokenizer.model_max_length]
__snake_case = self.text_encoder(text_input_ids.to(self.device))[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__snake_case , __snake_case , __snake_case = text_embeddings.shape
__snake_case = text_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = text_embeddings.view(bs_embed * num_images_per_prompt , lowercase_ , -1)
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__snake_case = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__snake_case = 42
if negative_prompt is None:
__snake_case = [''] * batch_size
elif type(lowercase_) is not type(lowercase_):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(lowercase_)} !="
F" {type(lowercase_)}.")
elif isinstance(lowercase_ , lowercase_):
__snake_case = [negative_prompt]
elif batch_size != len(lowercase_):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(lowercase_)}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.')
else:
__snake_case = negative_prompt
__snake_case = text_input_ids.shape[-1]
__snake_case = self.tokenizer(
lowercase_ , padding='max_length' , max_length=lowercase_ , truncation=lowercase_ , return_tensors='pt' , )
__snake_case = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__snake_case = uncond_embeddings.shape[1]
__snake_case = uncond_embeddings.repeat(1 , lowercase_ , 1)
__snake_case = uncond_embeddings.view(batch_size * num_images_per_prompt , lowercase_ , -1)
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__snake_case = torch.cat([uncond_embeddings, text_embeddings])
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__snake_case = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__snake_case = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device='cpu' , dtype=lowercase_).to(
self.device)
else:
__snake_case = torch.randn(lowercase_ , generator=lowercase_ , device=self.device , dtype=lowercase_)
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
__snake_case = latents.to(self.device)
# set timesteps
self.scheduler.set_timesteps(lowercase_)
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__snake_case = self.scheduler.timesteps.to(self.device)
# scale the initial noise by the standard deviation required by the scheduler
__snake_case = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__snake_case = 'eta' in set(inspect.signature(self.scheduler.step).parameters.keys())
__snake_case = {}
if accepts_eta:
__snake_case = eta
for i, t in enumerate(self.progress_bar(lowercase_)):
# expand the latents if we are doing classifier free guidance
__snake_case = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
__snake_case = self.scheduler.scale_model_input(lowercase_ , lowercase_)
# predict the noise residual
__snake_case = self.unet(lowercase_ , lowercase_ , encoder_hidden_states=lowercase_).sample
# perform guidance
if do_classifier_free_guidance:
__snake_case , __snake_case = noise_pred.chunk(2)
__snake_case = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__snake_case = self.scheduler.step(lowercase_ , lowercase_ , lowercase_ , **lowercase_).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowercase_ , lowercase_ , lowercase_)
__snake_case = 1 / 0.1_8215 * latents
__snake_case = self.vae.decode(lowercase_).sample
__snake_case = (image / 2 + 0.5).clamp(0 , 1)
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__snake_case = image.cpu().permute(0 , 2 , 3 , 1).float().numpy()
if output_type == "pil":
__snake_case = self.numpy_to_pil(lowercase_)
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=lowercase_ , nsfw_content_detected=lowercase_)
| 676 | 0 |
'''simple docstring'''
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"""split_dict""" , [
SplitDict(),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 , dataset_name="""my_dataset""" )} ),
SplitDict({"""train""": SplitInfo(name="""train""" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"""train""": SplitInfo()} ),
] , )
def lowerCamelCase ( __lowerCamelCase : SplitDict ) ->Optional[Any]:
_SCREAMING_SNAKE_CASE = split_dict._to_yaml_list()
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = SplitDict._from_yaml_list(__lowerCamelCase )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
_SCREAMING_SNAKE_CASE = None
# the split name of split_dict takes over the name of the split info object
_SCREAMING_SNAKE_CASE = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"""split_info""" , [SplitInfo(), SplitInfo(dataset_name=__lowerCamelCase ), SplitInfo(dataset_name="""my_dataset""" )] )
def lowerCamelCase ( __lowerCamelCase : Dict ) ->str:
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
_SCREAMING_SNAKE_CASE = asdict(SplitDict({"""train""": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 314 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = 42
UpperCamelCase = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 314 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _SCREAMING_SNAKE_CASE :
def __init__( self , lowercase , lowercase=13 , lowercase=7 , lowercase=True , lowercase=True , lowercase=True , lowercase=True , lowercase=99 , lowercase=16 , lowercase=36 , lowercase=6 , lowercase=6 , lowercase=6 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=16 , lowercase=2 , lowercase=0.0_2 , lowercase=3 , lowercase=4 , lowercase=None , ) -> List[str]:
lowerCamelCase_ = parent
lowerCamelCase_ = batch_size
lowerCamelCase_ = seq_length
lowerCamelCase_ = is_training
lowerCamelCase_ = use_input_mask
lowerCamelCase_ = use_token_type_ids
lowerCamelCase_ = use_labels
lowerCamelCase_ = vocab_size
lowerCamelCase_ = embedding_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_hidden_groups
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = type_sequence_label_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = num_labels
lowerCamelCase_ = num_choices
lowerCamelCase_ = scope
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase_ = None
if self.use_input_mask:
lowerCamelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
lowerCamelCase_ = None
if self.use_token_type_ids:
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase_ = None
lowerCamelCase_ = None
lowerCamelCase_ = None
if self.use_labels:
lowerCamelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase_ = ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase_ = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE_( self ) -> int:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = AlbertModel(config=__A )
model.to(__A )
model.eval()
lowerCamelCase_ = model(__A , attention_mask=__A , token_type_ids=__A )
lowerCamelCase_ = model(__A , token_type_ids=__A )
lowerCamelCase_ = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Dict:
lowerCamelCase_ = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
lowerCamelCase_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> List[str]:
lowerCamelCase_ = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
lowerCamelCase_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Optional[Any]:
lowerCamelCase_ = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
lowerCamelCase_ = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> str:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
lowerCamelCase_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = self.num_labels
lowerCamelCase_ = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
lowerCamelCase_ = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase , lowercase ) -> Tuple:
lowerCamelCase_ = self.num_choices
lowerCamelCase_ = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
lowerCamelCase_ = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCamelCase_ = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
lowerCamelCase_ = self.prepare_config_and_inputs()
(
lowerCamelCase_
) = config_and_inputs
lowerCamelCase_ = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class _SCREAMING_SNAKE_CASE ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
lowerCAmelCase__ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ = (
{
'feature-extraction': AlbertModel,
'fill-mask': AlbertForMaskedLM,
'question-answering': AlbertForQuestionAnswering,
'text-classification': AlbertForSequenceClassification,
'token-classification': AlbertForTokenClassification,
'zero-shot': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ = True
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=False ) -> Optional[int]:
lowerCamelCase_ = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
lowerCamelCase_ = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
lowerCamelCase_ = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = AlbertModelTester(self )
lowerCamelCase_ = ConfigTester(self , config_class=__A , hidden_size=37 )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def SCREAMING_SNAKE_CASE_( self ) -> Dict:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def SCREAMING_SNAKE_CASE_( self ) -> Optional[Any]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def SCREAMING_SNAKE_CASE_( self ) -> str:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def SCREAMING_SNAKE_CASE_( self ) -> List[str]:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def SCREAMING_SNAKE_CASE_( self ) -> Any:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def SCREAMING_SNAKE_CASE_( self ) -> int:
lowerCamelCase_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCamelCase_ = type
self.model_tester.create_and_check_model(*__A )
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Any:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_( self ) -> Union[str, Any]:
lowerCamelCase_ = AlbertModel.from_pretrained("albert-base-v2" )
lowerCamelCase_ = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCamelCase_ = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCamelCase_ = model(__A , attention_mask=__A )[0]
lowerCamelCase_ = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , __A )
lowerCamelCase_ = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 703 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__A ='''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__A ='''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__A ='''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _SCREAMING_SNAKE_CASE ( datasets.Metric ):
def SCREAMING_SNAKE_CASE_( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ),
"references": datasets.Sequence(
datasets.Sequence(datasets.Value("string" , id="token" ) , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"] , reference_urls=[
"https://en.wikipedia.org/wiki/BLEU",
"https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213",
] , )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase=4 , lowercase=False ) -> int:
lowerCamelCase_ = compute_bleu(
reference_corpus=lowercase , translation_corpus=lowercase , max_order=lowercase , smooth=lowercase )
((lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_) , (lowerCamelCase_)) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 313 | 0 |
"""simple docstring"""
a__ : Tuple = """\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"""
a__ : List[Any] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
a__ : Optional[int] = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 589 |
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'huggingface/time-series-transformer-tourism-monthly': (
'https://huggingface.co/huggingface/time-series-transformer-tourism-monthly/resolve/main/config.json'
),
# See all TimeSeriesTransformer models at https://huggingface.co/models?filter=time_series_transformer
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Union[str, Any] = '''time_series_transformer'''
_snake_case : str = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "student_t" , _UpperCamelCase = "nll" , _UpperCamelCase = 1 , _UpperCamelCase = [1, 2, 3, 4, 5, 6, 7] , _UpperCamelCase = "mean" , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = 0 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = 3_2 , _UpperCamelCase = 3_2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = 2 , _UpperCamelCase = True , _UpperCamelCase = "gelu" , _UpperCamelCase = 6_4 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 0.1 , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 0.02 , _UpperCamelCase=True , **_UpperCamelCase , ) -> str:
# time series specific configuration
UpperCAmelCase_ : Optional[Any] = prediction_length
UpperCAmelCase_ : List[str] = context_length or prediction_length
UpperCAmelCase_ : List[str] = distribution_output
UpperCAmelCase_ : List[Any] = loss
UpperCAmelCase_ : Tuple = input_size
UpperCAmelCase_ : int = num_time_features
UpperCAmelCase_ : List[Any] = lags_sequence
UpperCAmelCase_ : str = scaling
UpperCAmelCase_ : List[str] = num_dynamic_real_features
UpperCAmelCase_ : Optional[Any] = num_static_real_features
UpperCAmelCase_ : int = num_static_categorical_features
if cardinality and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : List[Any] = cardinality
else:
UpperCAmelCase_ : Optional[Any] = [0]
if embedding_dimension and num_static_categorical_features > 0:
if len(_UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
UpperCAmelCase_ : Optional[Any] = embedding_dimension
else:
UpperCAmelCase_ : Optional[int] = [min(5_0 , (cat + 1) // 2 ) for cat in self.cardinality]
UpperCAmelCase_ : Optional[Any] = num_parallel_samples
# Transformer architecture configuration
UpperCAmelCase_ : List[Any] = input_size * len(_UpperCamelCase ) + self._number_of_features
UpperCAmelCase_ : Tuple = d_model
UpperCAmelCase_ : List[Any] = encoder_attention_heads
UpperCAmelCase_ : Tuple = decoder_attention_heads
UpperCAmelCase_ : Union[str, Any] = encoder_ffn_dim
UpperCAmelCase_ : Optional[int] = decoder_ffn_dim
UpperCAmelCase_ : Any = encoder_layers
UpperCAmelCase_ : List[str] = decoder_layers
UpperCAmelCase_ : List[str] = dropout
UpperCAmelCase_ : Tuple = attention_dropout
UpperCAmelCase_ : Optional[int] = activation_dropout
UpperCAmelCase_ : str = encoder_layerdrop
UpperCAmelCase_ : Optional[int] = decoder_layerdrop
UpperCAmelCase_ : Optional[int] = activation_function
UpperCAmelCase_ : str = init_std
UpperCAmelCase_ : int = use_cache
super().__init__(is_encoder_decoder=_UpperCamelCase , **_UpperCamelCase )
@property
def __UpperCAmelCase ( self ) -> int:
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 406 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Dict = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class lowerCamelCase_( A__ ):
'''simple docstring'''
lowercase__ : List[str] = 'swin2sr'
lowercase__ : Tuple = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , lowerCamelCase__=6_4 , lowerCamelCase__=1 , lowerCamelCase__=3 , lowerCamelCase__=1_8_0 , lowerCamelCase__=[6, 6, 6, 6, 6, 6] , lowerCamelCase__=[6, 6, 6, 6, 6, 6] , lowerCamelCase__=8 , lowerCamelCase__=2.0 , lowerCamelCase__=True , lowerCamelCase__=0.0 , lowerCamelCase__=0.0 , lowerCamelCase__=0.1 , lowerCamelCase__="gelu" , lowerCamelCase__=False , lowerCamelCase__=0.0_2 , lowerCamelCase__=1e-5 , lowerCamelCase__=2 , lowerCamelCase__=1.0 , lowerCamelCase__="1conv" , lowerCamelCase__="pixelshuffle" , **lowerCamelCase__ , ):
super().__init__(**lowerCamelCase__ )
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = embed_dim
_lowerCamelCase = depths
_lowerCamelCase = len(lowerCamelCase__ )
_lowerCamelCase = num_heads
_lowerCamelCase = window_size
_lowerCamelCase = mlp_ratio
_lowerCamelCase = qkv_bias
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = drop_path_rate
_lowerCamelCase = hidden_act
_lowerCamelCase = use_absolute_embeddings
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = initializer_range
_lowerCamelCase = upscale
_lowerCamelCase = img_range
_lowerCamelCase = resi_connection
_lowerCamelCase = upsampler
| 717 |
"""simple docstring"""
from __future__ import annotations
import copy
import inspect
import json
import math
import os
import tempfile
import unittest
from importlib import import_module
import numpy as np
from transformers import ViTMAEConfig
from transformers.file_utils import cached_property, is_tf_available, is_vision_available
from transformers.testing_utils import require_tf, require_vision, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTMAEForPreTraining, TFViTMAEModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase_:
'''simple docstring'''
def __init__( self , lowerCamelCase__ , lowerCamelCase__=1_3 , lowerCamelCase__=3_0 , lowerCamelCase__=2 , lowerCamelCase__=3 , lowerCamelCase__=True , lowerCamelCase__=True , lowerCamelCase__=3_2 , lowerCamelCase__=2 , lowerCamelCase__=4 , lowerCamelCase__=3_7 , lowerCamelCase__="gelu" , lowerCamelCase__=0.1 , lowerCamelCase__=0.1 , lowerCamelCase__=1_0 , lowerCamelCase__=0.0_2 , lowerCamelCase__=3 , lowerCamelCase__=0.6 , lowerCamelCase__=None , ):
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = mask_ratio
_lowerCamelCase = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
_lowerCamelCase = (image_size // patch_size) ** 2
_lowerCamelCase = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def snake_case__ ( self ):
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def snake_case__ ( self ):
return ViTMAEConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , decoder_hidden_size=self.hidden_size , decoder_num_hidden_layers=self.num_hidden_layers , decoder_num_attention_heads=self.num_attention_heads , decoder_intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCamelCase__ , initializer_range=self.initializer_range , mask_ratio=self.mask_ratio , )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEModel(config=lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
# expected sequence length = num_patches
_lowerCamelCase = (self.image_size // self.patch_size) ** 2
_lowerCamelCase = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFViTMAEForPreTraining(lowerCamelCase__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(lowerCamelCase__ , training=lowerCamelCase__ )
_lowerCamelCase = self.patch_size**2
self.parent.assertEqual(result.logits.shape , (self.batch_size, num_patches, expected_num_channels) )
def snake_case__ ( self ):
_lowerCamelCase = self.prepare_config_and_inputs()
((_lowerCamelCase) , (_lowerCamelCase) , (_lowerCamelCase)) = config_and_inputs
_lowerCamelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowerCamelCase_( A__, A__, unittest.TestCase ):
'''simple docstring'''
lowercase__ : Optional[Any] = (TFViTMAEModel, TFViTMAEForPreTraining) if is_tf_available() else ()
lowercase__ : Dict = {'feature-extraction': TFViTMAEModel} if is_tf_available() else {}
lowercase__ : Optional[Any] = False
lowercase__ : Union[str, Any] = False
lowercase__ : str = False
lowercase__ : List[str] = False
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModelTester(self )
_lowerCamelCase = ConfigTester(self , config_class=lowerCamelCase__ , has_text_modality=lowerCamelCase__ , hidden_size=3_7 )
def snake_case__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViTMAE does not use inputs_embeds''' )
def snake_case__ ( self ):
pass
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
_lowerCamelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase__ , tf.keras.layers.Layer ) )
def snake_case__ ( self ):
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def snake_case__ ( self ):
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = copy.deepcopy(self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ ) )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = outputs_dict[0].numpy()
_lowerCamelCase = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords ) ) , 1e-6 )
def snake_case__ ( self ):
# make the mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
def prepare_numpy_arrays(lowerCamelCase__ ):
_lowerCamelCase = {}
for k, v in inputs_dict.items():
if tf.is_tensor(lowerCamelCase__ ):
_lowerCamelCase = v.numpy()
else:
_lowerCamelCase = np.array(lowerCamelCase__ )
return inputs_np_dict
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = prepare_numpy_arrays(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# make masks reproducible
np.random.seed(2 )
_lowerCamelCase = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.constant(lowerCamelCase__ )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
_lowerCamelCase = tf_noise
super().check_pt_tf_models(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = {
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__ ),)
for module_member_name in dir(lowerCamelCase__ )
if module_member_name.endswith('''MainLayer''' )
# This condition is required, since `modeling_tf_clip.py` has 3 classes whose names end with `MainLayer`.
and module_member_name[: -len('''MainLayer''' )] == model_class.__name__[: -len('''Model''' )]
for module_member in (getattr(lowerCamelCase__ , lowerCamelCase__ ),)
if isinstance(lowerCamelCase__ , lowerCamelCase__ )
and tf.keras.layers.Layer in module_member.__bases__
and getattr(lowerCamelCase__ , '''_keras_serializable''' , lowerCamelCase__ )
}
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
_lowerCamelCase = tf.convert_to_tensor(lowerCamelCase__ )
inputs_dict.update({'''noise''': noise} )
for main_layer_class in tf_main_layer_classes:
_lowerCamelCase = main_layer_class(lowerCamelCase__ )
_lowerCamelCase = {
name: tf.keras.Input(tensor.shape[1:] , dtype=tensor.dtype ) for name, tensor in inputs_dict.items()
}
_lowerCamelCase = tf.keras.Model(lowerCamelCase__ , outputs=main_layer(lowerCamelCase__ ) )
_lowerCamelCase = model(lowerCamelCase__ )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowerCamelCase = os.path.join(lowerCamelCase__ , '''keras_model.h5''' )
model.save(lowerCamelCase__ )
_lowerCamelCase = tf.keras.models.load_model(
lowerCamelCase__ , custom_objects={main_layer_class.__name__: main_layer_class} )
assert isinstance(lowerCamelCase__ , tf.keras.Model )
_lowerCamelCase = model(lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@slow
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = outputs.last_hidden_state.numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = outputs.logits.numpy()
_lowerCamelCase = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ , saved_model=lowerCamelCase__ )
_lowerCamelCase = model_class.from_pretrained(lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
if model_class.__name__ == "TFViTMAEModel":
_lowerCamelCase = after_outputs['''last_hidden_state'''].numpy()
_lowerCamelCase = 0
else:
_lowerCamelCase = after_outputs['''logits'''].numpy()
_lowerCamelCase = 0
_lowerCamelCase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(lowerCamelCase__ , 1e-5 )
def snake_case__ ( self ):
# make mask reproducible
np.random.seed(2 )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = int((config.image_size // config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(lowerCamelCase__ )
_lowerCamelCase = self._prepare_for_class(lowerCamelCase__ , lowerCamelCase__ )
_lowerCamelCase = model(lowerCamelCase__ , noise=lowerCamelCase__ )
_lowerCamelCase = model.get_config()
# make sure that returned config is jsonifiable, which is required by keras
json.dumps(lowerCamelCase__ )
_lowerCamelCase = model_class.from_config(model.get_config() )
# make sure it also accepts a normal config
_lowerCamelCase = model_class.from_config(model.config )
_lowerCamelCase = new_model(lowerCamelCase__ ) # Build model
new_model.set_weights(model.get_weights() )
_lowerCamelCase = new_model(lowerCamelCase__ , noise=lowerCamelCase__ )
self.assert_outputs_same(lowerCamelCase__ , lowerCamelCase__ )
@unittest.skip(
reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.''' )
def snake_case__ ( self ):
pass
@unittest.skip(reason='''ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load''' )
def snake_case__ ( self ):
pass
@slow
def snake_case__ ( self ):
_lowerCamelCase = TFViTMAEModel.from_pretrained('''google/vit-base-patch16-224''' )
self.assertIsNotNone(lowerCamelCase__ )
def lowerCAmelCase_( ) -> List[Any]:
_lowerCamelCase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowerCamelCase_( unittest.TestCase ):
'''simple docstring'''
@cached_property
def snake_case__ ( self ):
return ViTImageProcessor.from_pretrained('''facebook/vit-mae-base''' ) if is_vision_available() else None
@slow
def snake_case__ ( self ):
# make random mask reproducible across the PT and TF model
np.random.seed(2 )
_lowerCamelCase = TFViTMAEForPreTraining.from_pretrained('''facebook/vit-mae-base''' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=lowerCamelCase__ , return_tensors='''tf''' )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
_lowerCamelCase = ViTMAEConfig()
_lowerCamelCase = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
_lowerCamelCase = np.random.uniform(size=(1, num_patches) )
# forward pass
_lowerCamelCase = model(**lowerCamelCase__ , noise=lowerCamelCase__ )
# verify the logits
_lowerCamelCase = tf.convert_to_tensor([1, 1_9_6, 7_6_8] )
self.assertEqual(outputs.logits.shape , lowerCamelCase__ )
_lowerCamelCase = tf.convert_to_tensor(
[[-0.0_5_4_8, -1.7_0_2_3, -0.9_3_2_5], [0.3_7_2_1, -0.5_6_7_0, -0.2_2_3_3], [0.8_2_3_5, -1.3_8_7_8, -0.3_5_2_4]] )
tf.debugging.assert_near(outputs.logits[0, :3, :3] , lowerCamelCase__ , atol=1e-4 )
| 623 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : int ) -> Any:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __magic_name__ ( self : Tuple ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =1
SCREAMING_SNAKE_CASE__ : str =3
SCREAMING_SNAKE_CASE__ : Union[str, Any] =(32, 32)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__lowercase )
return image
@property
def __magic_name__ ( self : Tuple ) -> int:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def __magic_name__ ( self : Optional[Any] ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any =AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def __magic_name__ ( self : str ) -> Any:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=50_06 , )
return RobertaSeriesModelWithTransformation(__lowercase )
@property
def __magic_name__ ( self : List[Any] ) -> Optional[int]:
def extract(*__lowercase : Optional[Any] , **__lowercase : List[Any] ):
class __SCREAMING_SNAKE_CASE :
def __init__( self : Optional[int] ) -> str:
SCREAMING_SNAKE_CASE__ : Dict =torch.ones([0] )
def __magic_name__ ( self : Optional[int] , __lowercase : Optional[Any] ) -> Any:
self.pixel_values.to(__lowercase )
return self
return Out()
return extract
def __magic_name__ ( self : Tuple ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : int ='''cpu''' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ : str =self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Any =PNDMScheduler(skip_prk_steps=__lowercase )
SCREAMING_SNAKE_CASE__ : int =self.dummy_vae
SCREAMING_SNAKE_CASE__ : List[Any] =self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Optional[Any] =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ : str =77
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_image.to(__lowercase )
SCREAMING_SNAKE_CASE__ : List[str] =init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : List[Any] =AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : str =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Tuple ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Any =alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output.images
SCREAMING_SNAKE_CASE__ : Optional[int] =torch.Generator(device=__lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple =alt_pipe(
[prompt] , generator=__lowercase , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=__lowercase , return_dict=__lowercase , )[0]
SCREAMING_SNAKE_CASE__ : int =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Dict =image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ : Optional[Any] =np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5e-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __magic_name__ ( self : Dict ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =self.dummy_cond_unet
SCREAMING_SNAKE_CASE__ : Tuple =PNDMScheduler(skip_prk_steps=__lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dummy_vae
SCREAMING_SNAKE_CASE__ : Optional[int] =self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : Tuple =XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =77
SCREAMING_SNAKE_CASE__ : Dict =self.dummy_image.to(__lowercase )
# put models in fp16
SCREAMING_SNAKE_CASE__ : List[Any] =unet.half()
SCREAMING_SNAKE_CASE__ : int =vae.half()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =bert.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ : Optional[int] =AltDiffusionImgaImgPipeline(
unet=__lowercase , scheduler=__lowercase , vae=__lowercase , text_encoder=__lowercase , tokenizer=__lowercase , safety_checker=__lowercase , feature_extractor=self.dummy_extractor , )
SCREAMING_SNAKE_CASE__ : Tuple =VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =alt_pipe.to(__lowercase )
alt_pipe.set_progress_bar_config(disable=__lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Dict =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[Any] =alt_pipe(
[prompt] , generator=__lowercase , num_inference_steps=2 , output_type='''np''' , image=__lowercase , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def __magic_name__ ( self : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Optional[int] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ : int =init_image.resize((7_60, 5_04) )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE__ : Optional[int] =AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Optional[Any] ='''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE__ : int =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str =pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =output.images[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] =image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 7_60, 3)
SCREAMING_SNAKE_CASE__ : Union[str, Any] =np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __magic_name__ ( self : Tuple ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __magic_name__ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
SCREAMING_SNAKE_CASE__ : Any =init_image.resize((7_68, 5_12) )
SCREAMING_SNAKE_CASE__ : Optional[Any] =load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
SCREAMING_SNAKE_CASE__ : Any ='''BAAI/AltDiffusion'''
SCREAMING_SNAKE_CASE__ : List[Any] =AltDiffusionImgaImgPipeline.from_pretrained(
__lowercase , safety_checker=__lowercase , )
pipe.to(__lowercase )
pipe.set_progress_bar_config(disable=__lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ : Any ='''A fantasy landscape, trending on artstation'''
SCREAMING_SNAKE_CASE__ : List[str] =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Optional[int] =pipe(
prompt=__lowercase , image=__lowercase , strength=0.75 , guidance_scale=7.5 , generator=__lowercase , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Dict =output.images[0]
assert image.shape == (5_12, 7_68, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1e-2 | 296 |
'''simple docstring'''
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE ( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
snake_case_ = [("""size""", ctypes.c_int), ("""visible""", ctypes.c_byte)]
def _a( ):
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : Optional[int] =CursorInfo()
SCREAMING_SNAKE_CASE__ : Optional[int] =ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase__, ctypes.byref(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =False
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase__, ctypes.byref(UpperCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25l''' )
sys.stdout.flush()
def _a( ):
'''simple docstring'''
if os.name == "nt":
SCREAMING_SNAKE_CASE__ : List[str] =CursorInfo()
SCREAMING_SNAKE_CASE__ : Optional[Any] =ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(UpperCamelCase__, ctypes.byref(UpperCamelCase__ ) )
SCREAMING_SNAKE_CASE__ : List[str] =True
ctypes.windll.kernelaa.SetConsoleCursorInfo(UpperCamelCase__, ctypes.byref(UpperCamelCase__ ) )
elif os.name == "posix":
sys.stdout.write('''\033[?25h''' )
sys.stdout.flush()
@contextmanager
def _a( ):
'''simple docstring'''
try:
hide_cursor()
yield
finally:
show_cursor() | 296 | 1 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( snake_case ) -> bool:
if len(snake_case ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
_UpperCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 175 |
def _SCREAMING_SNAKE_CASE ( snake_case , snake_case ) -> bool:
_UpperCAmelCase = len(snake_case ) + 1
_UpperCAmelCase = len(snake_case ) + 1
# dp is a 2d matrix where dp[i][j] denotes whether prefix string of
# length i of input_string matches with prefix string of length j of
# given pattern.
# "dp" stands for dynamic programming.
_UpperCAmelCase = [[0 for i in range(snake_case )] for j in range(snake_case )]
# since string of zero length match pattern of zero length
_UpperCAmelCase = 1
# since pattern of zero length will never match with string of non-zero length
for i in range(1 , snake_case ):
_UpperCAmelCase = 0
# since string of zero length will match with pattern where there
# is at least one * alternatively
for j in range(1 , snake_case ):
_UpperCAmelCase = dp[0][j - 2] if pattern[j - 1] == """*""" else 0
# now using bottom-up approach to find for all remaining lengths
for i in range(1 , snake_case ):
for j in range(1 , snake_case ):
if input_string[i - 1] == pattern[j - 1] or pattern[j - 1] == ".":
_UpperCAmelCase = dp[i - 1][j - 1]
elif pattern[j - 1] == "*":
if dp[i][j - 2] == 1:
_UpperCAmelCase = 1
elif pattern[j - 2] in (input_string[i - 1], "."):
_UpperCAmelCase = dp[i - 1][j]
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = 0
return bool(dp[-1][-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
# inputing the strings
# input_string = input("input a string :")
# pattern = input("input a pattern :")
a = "aab"
a = "c*a*b"
# using function to check whether given string matches the given pattern
if match_pattern(input_string, pattern):
print(F'{input_string} matches the given pattern {pattern}')
else:
print(F'{input_string} does not match with the given pattern {pattern}') | 175 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_A : List[Any] = {"""configuration_encoder_decoder""": ["""EncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : List[Any] = ["""EncoderDecoderModel"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : str = ["""TFEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A : Tuple = ["""FlaxEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
_A : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 361 |
"""simple docstring"""
import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
_A : str = False
try:
_A : Dict = _is_package_available("""google.colab""")
except ModuleNotFoundError:
pass
@input.register
class a__ :
def __init__( self , _a = None , _a = [] ):
lowercase : Union[str, Any] = 0
lowercase : str = choices
lowercase : List[Any] = prompt
if sys.platform == "win32":
lowercase : Union[str, Any] = "*"
else:
lowercase : int = "➔ "
def __magic_name__ ( self , _a , _a = "" ):
if sys.platform != "win32":
writeColor(self.choices[index] , 32 , _a )
else:
forceWrite(self.choices[index] , _a )
def __magic_name__ ( self , _a ):
if index == self.position:
forceWrite(f""" {self.arrow_char} """ )
self.write_choice(_a )
else:
forceWrite(f""" {self.choices[index]}""" )
reset_cursor()
def __magic_name__ ( self , _a , _a = 1 ):
lowercase : Optional[int] = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(_a )
move_cursor(_a , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP["up"] )
def __magic_name__ ( self ):
self.move_direction(Direction.UP )
@input.mark(KEYMAP["down"] )
def __magic_name__ ( self ):
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP["newline"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
return self.position
@input.mark(KEYMAP["interrupt"] )
def __magic_name__ ( self ):
move_cursor(len(self.choices ) - self.position , "DOWN" )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(_a )] for number in range(10 )] )
def __magic_name__ ( self ):
lowercase : List[str] = int(chr(self.current_selection ) )
lowercase : Union[str, Any] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , _a )
else:
return
else:
return
def __magic_name__ ( self , _a = 0 ):
if self.prompt:
linebreak()
forceWrite(self.prompt , "\n" )
if in_colab:
forceWrite("Please input a choice index (starting from 0), and press enter" , "\n" )
else:
forceWrite("Please select a choice using the arrow or number keys, and selecting with enter" , "\n" )
lowercase : Tuple = default_choice
for i in range(len(self.choices ) ):
self.print_choice(_a )
forceWrite("\n" )
move_cursor(len(self.choices ) - self.position , "UP" )
with cursor.hide():
while True:
if in_colab:
try:
lowercase : Tuple = int(builtins.input() )
except ValueError:
lowercase : Optional[int] = default_choice
else:
lowercase : int = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , "UP" )
clear_line()
self.write_choice(_a , "\n" )
return choice
| 361 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 713 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE : Tuple = """src/transformers"""
SCREAMING_SNAKE_CASE : Optional[int] = """docs/source/en/tasks"""
def lowercase ( _snake_case : int , _snake_case : Optional[Any] , _snake_case : List[str] ) ->Union[str, Any]:
"""simple docstring"""
with open(_snake_case , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
__snake_case : List[str] = f.readlines()
# Find the start prompt.
__snake_case : int = 0
while not lines[start_index].startswith(_snake_case ):
start_index += 1
start_index += 1
__snake_case : str = start_index
while not lines[end_index].startswith(_snake_case ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE : Optional[int] = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE : str = {
"""asr.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"""audio_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"""language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"""image_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"""masked_language_modeling.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"""multiple_choice.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"""object_detection.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"""question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"""semantic_segmentation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"""sequence_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"""summarization.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""token_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"""translation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"""video_classification.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"""document_question_answering.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"""monocular_depth_estimation.md""": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE : List[str] = {
"""summarization.md""": ("""nllb""",),
"""translation.md""": ("""nllb""",),
}
def lowercase ( _snake_case : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
__snake_case : str = TASK_GUIDE_TO_MODELS[task_guide]
__snake_case : str = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_snake_case , set() )
__snake_case : Union[str, Any] = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def lowercase ( _snake_case : Union[str, Any] , _snake_case : str=False ) ->Optional[int]:
"""simple docstring"""
__snake_case , __snake_case , __snake_case , __snake_case : Tuple = _find_text_in_file(
filename=os.path.join(_snake_case , _snake_case ) , start_prompt='''<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->''' , end_prompt='''<!--End of the generated tip-->''' , )
__snake_case : str = get_model_list_for_task(_snake_case )
if current_list != new_list:
if overwrite:
with open(os.path.join(_snake_case , _snake_case ) , '''w''' , encoding='''utf-8''' , newline='''\n''' ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
''' to fix this.''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
SCREAMING_SNAKE_CASE : List[str] = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 229 | 0 |
import webbrowser
from sys import argv
from urllib.parse import parse_qs, quote
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = "%20".join(argv[1:]) if len(argv) > 1 else quote(str(input("Search: ")))
print("Googling.....")
SCREAMING_SNAKE_CASE__ = F'''https://www.google.com/search?q={query}&num=100'''
SCREAMING_SNAKE_CASE__ = requests.get(
url,
headers={"User-Agent": str(UserAgent().random)},
)
try:
SCREAMING_SNAKE_CASE__ = (
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "yuRUbf"})
.find("a")
.get("href")
)
except AttributeError:
SCREAMING_SNAKE_CASE__ = parse_qs(
BeautifulSoup(res.text, "html.parser")
.find("div", attrs={"class": "kCrYT"})
.find("a")
.get("href")
)["url"][0]
webbrowser.open(link)
| 631 |
from argparse import ArgumentParser
from ..pipelines import Pipeline, PipelineDataFormat, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__) # pylint: disable=invalid-name
def lowercase ( a ):
'''simple docstring'''
if not path:
return "pipe"
for ext in PipelineDataFormat.SUPPORTED_FORMATS:
if path.endswith(a ):
return ext
raise Exception(
F"Unable to determine file format from file extension {path}. "
F"Please provide the format through --format {PipelineDataFormat.SUPPORTED_FORMATS}" )
def lowercase ( a ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :Tuple = pipeline(
task=args.task , model=args.model if args.model else None , config=args.config , tokenizer=args.tokenizer , device=args.device , )
SCREAMING_SNAKE_CASE_ :Any = try_infer_format_from_ext(args.input ) if args.format == "infer" else args.format
SCREAMING_SNAKE_CASE_ :str = PipelineDataFormat.from_str(
format=a , output_path=args.output , input_path=args.input , column=args.column if args.column else nlp.default_input_names , overwrite=args.overwrite , )
return RunCommand(a , a )
class _UpperCAmelCase ( lowercase ):
def __init__( self : List[Any] , UpperCAmelCase : Pipeline , UpperCAmelCase : PipelineDataFormat):
SCREAMING_SNAKE_CASE_ :int = nlp
SCREAMING_SNAKE_CASE_ :Any = reader
@staticmethod
def _snake_case ( UpperCAmelCase : ArgumentParser):
SCREAMING_SNAKE_CASE_ :Any = parser.add_parser("run" , help="Run a pipeline through the CLI")
run_parser.add_argument("--task" , choices=get_supported_tasks() , help="Task to run")
run_parser.add_argument("--input" , type=UpperCAmelCase , help="Path to the file to use for inference")
run_parser.add_argument("--output" , type=UpperCAmelCase , help="Path to the file that will be used post to write results.")
run_parser.add_argument("--model" , type=UpperCAmelCase , help="Name or path to the model to instantiate.")
run_parser.add_argument("--config" , type=UpperCAmelCase , help="Name or path to the model's config to instantiate.")
run_parser.add_argument(
"--tokenizer" , type=UpperCAmelCase , help="Name of the tokenizer to use. (default: same as the model name)")
run_parser.add_argument(
"--column" , type=UpperCAmelCase , help="Name of the column to use as input. (For multi columns input as QA use column1,columns2)" , )
run_parser.add_argument(
"--format" , type=UpperCAmelCase , default="infer" , choices=PipelineDataFormat.SUPPORTED_FORMATS , help="Input format to read from" , )
run_parser.add_argument(
"--device" , type=UpperCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
run_parser.add_argument("--overwrite" , action="store_true" , help="Allow overwriting the output file.")
run_parser.set_defaults(func=UpperCAmelCase)
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Union[str, Any] = self._nlp, []
for entry in self._reader:
SCREAMING_SNAKE_CASE_ :int = nlp(**UpperCAmelCase) if self._reader.is_multi_columns else nlp(UpperCAmelCase)
if isinstance(UpperCAmelCase , UpperCAmelCase):
outputs.append(UpperCAmelCase)
else:
outputs += output
# Saving data
if self._nlp.binary_output:
SCREAMING_SNAKE_CASE_ :Dict = self._reader.save_binary(UpperCAmelCase)
logger.warning(F"Current pipeline requires output to be in binary format, saving at {binary_path}")
else:
self._reader.save(UpperCAmelCase)
| 631 | 1 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
# Initialise PyTorch model
SCREAMING_SNAKE_CASE_ = RemBertConfig.from_json_file(_lowerCAmelCase )
print('''Building PyTorch model from configuration: {}'''.format(str(_lowerCAmelCase ) ) )
SCREAMING_SNAKE_CASE_ = RemBertModel(_lowerCAmelCase )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(_lowerCAmelCase ) )
torch.save(model.state_dict() , _lowerCAmelCase )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--rembert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained RemBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__SCREAMING_SNAKE_CASE =parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 700 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class __magic_name__ :
'''simple docstring'''
def __init__( self: Optional[int] , _lowerCamelCase: Tuple , _lowerCamelCase: Any=None , _lowerCamelCase: Any=None , _lowerCamelCase: Tuple=None , _lowerCamelCase: Optional[Any]="resnet50" , _lowerCamelCase: Any=3 , _lowerCamelCase: Optional[int]=32 , _lowerCamelCase: List[Any]=3 , _lowerCamelCase: int=True , _lowerCamelCase: Any=True , ):
SCREAMING_SNAKE_CASE_ = parent
SCREAMING_SNAKE_CASE_ = out_indices if out_indices is not None else [4]
SCREAMING_SNAKE_CASE_ = stage_names
SCREAMING_SNAKE_CASE_ = out_features
SCREAMING_SNAKE_CASE_ = backbone
SCREAMING_SNAKE_CASE_ = batch_size
SCREAMING_SNAKE_CASE_ = image_size
SCREAMING_SNAKE_CASE_ = num_channels
SCREAMING_SNAKE_CASE_ = use_pretrained_backbone
SCREAMING_SNAKE_CASE_ = is_training
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ = self.get_config()
return config, pixel_values
def _A ( self: List[Any] ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _A ( self: str , _lowerCamelCase: str , _lowerCamelCase: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = TimmBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ = model(_lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 14, 14) , )
def _A ( self: Optional[Any] ):
SCREAMING_SNAKE_CASE_ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = config_and_inputs
SCREAMING_SNAKE_CASE_ = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE__ : List[str] = {"feature-extraction": TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE__ : Tuple = False
SCREAMING_SNAKE_CASE__ : Optional[int] = False
SCREAMING_SNAKE_CASE__ : Any = False
SCREAMING_SNAKE_CASE__ : Any = False
def _A ( self: int ):
SCREAMING_SNAKE_CASE_ = TimmBackboneModelTester(self )
SCREAMING_SNAKE_CASE_ = ConfigTester(self , config_class=_lowerCamelCase , has_text_modality=_lowerCamelCase )
def _A ( self: str ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _A ( self: List[Any] ):
SCREAMING_SNAKE_CASE_ = '''resnet18'''
SCREAMING_SNAKE_CASE_ = '''microsoft/resnet-18'''
SCREAMING_SNAKE_CASE_ = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = AutoBackbone.from_pretrained(_lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
SCREAMING_SNAKE_CASE_ = AutoBackbone.from_pretrained(_lowerCamelCase , use_timm_backbone=_lowerCamelCase , out_indices=[1, 2, 3] )
SCREAMING_SNAKE_CASE_ = AutoBackbone.from_pretrained(_lowerCamelCase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip('''TimmBackbone doesn\'t support feed forward chunking''' )
def _A ( self: Optional[Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have num_hidden_layers attribute''' )
def _A ( self: Any ):
pass
@unittest.skip('''TimmBackbone initialization is managed on the timm side''' )
def _A ( self: Dict ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _A ( self: Optional[Any] ):
pass
@unittest.skip('''TimmBackbone models doesn\'t have inputs_embeds''' )
def _A ( self: List[Any] ):
pass
@unittest.skip('''TimmBackbone model cannot be created without specifying a backbone checkpoint''' )
def _A ( self: Tuple ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self: List[str] ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _A ( self: Any ):
pass
@unittest.skip('''model weights aren\'t tied in TimmBackbone.''' )
def _A ( self: Tuple ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self: Optional[int] ):
pass
@unittest.skip('''Only checkpoints on timm can be loaded into TimmBackbone''' )
def _A ( self: Union[str, Any] ):
pass
@unittest.skip('''TimmBackbone doesn\'t have hidden size info in its configuration.''' )
def _A ( self: Any ):
pass
@unittest.skip('''TimmBackbone doesn\'t support output_attentions.''' )
def _A ( self: Tuple ):
pass
@unittest.skip('''Safetensors is not supported by timm.''' )
def _A ( self: Optional[int] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def _A ( self: str ):
pass
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowerCamelCase )
def _A ( self: Dict ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = self.has_attentions
# no need to test all models as different heads yield the same functionality
SCREAMING_SNAKE_CASE_ = self.all_model_classes[0]
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = outputs[0][-1]
# Encoder-/Decoder-only models
SCREAMING_SNAKE_CASE_ = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
SCREAMING_SNAKE_CASE_ = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _A ( self: List[str] ):
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = None
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
SCREAMING_SNAKE_CASE_ = copy.deepcopy(_lowerCamelCase )
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = model(**_lowerCamelCase )
| 89 | 0 |
import numpy as np
def __snake_case ( _UpperCamelCase ) -> np.array:
return 1 / (1 + np.exp(-vector ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 487 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def __snake_case ( _UpperCamelCase , _UpperCamelCase ) -> Union[str, Any]:
_a = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'''
_a = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('''RGB''' )
_a = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
_a = transform(_UpperCamelCase ).unsqueeze(0 ).to(_UpperCamelCase )
return image
def __snake_case ( _UpperCamelCase ) -> List[str]:
if "visual_encoder" in key:
_a = re.sub('''visual_encoder*''' , '''vision_model.encoder''' , _UpperCamelCase )
if "blocks" in key:
_a = re.sub(R'''blocks''' , '''layers''' , _UpperCamelCase )
if "attn" in key:
_a = re.sub(R'''attn''' , '''self_attn''' , _UpperCamelCase )
if "norm1" in key:
_a = re.sub(R'''norm1''' , '''layer_norm1''' , _UpperCamelCase )
if "norm2" in key:
_a = re.sub(R'''norm2''' , '''layer_norm2''' , _UpperCamelCase )
if "encoder.norm" in key:
_a = re.sub(R'''encoder.norm''' , '''post_layernorm''' , _UpperCamelCase )
if "encoder.patch_embed.proj" in key:
_a = re.sub(R'''encoder.patch_embed.proj''' , '''embeddings.patch_embedding''' , _UpperCamelCase )
if "encoder.pos_embed" in key:
_a = re.sub(R'''encoder.pos_embed''' , '''embeddings.position_embedding''' , _UpperCamelCase )
if "encoder.cls_token" in key:
_a = re.sub(R'''encoder.cls_token''' , '''embeddings.class_embedding''' , _UpperCamelCase )
if "self_attn" in key:
_a = re.sub(R'''self_attn.proj''' , '''self_attn.projection''' , _UpperCamelCase )
return key
@torch.no_grad()
def __snake_case ( _UpperCamelCase , _UpperCamelCase=None ) -> List[str]:
if config_path is not None:
_a = BlipConfig.from_pretrained(_UpperCamelCase )
else:
_a = BlipConfig(projection_dim=5_12 , text_config={} , vision_config={} )
_a = BlipForConditionalGeneration(_UpperCamelCase ).eval()
_a = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth'''
_a = blip_decoder(pretrained=_UpperCamelCase , image_size=3_84 , vit='''base''' )
_a = pt_model.eval()
_a = pt_model.state_dict()
for key in modified_state_dict.copy():
_a = modified_state_dict.pop(_UpperCamelCase )
_a = rename_key(_UpperCamelCase )
_a = value
hf_model.load_state_dict(_UpperCamelCase )
_a = 3_84
_a = load_demo_image(image_size=_UpperCamelCase , device='''cpu''' )
_a = BertTokenizer.from_pretrained('''bert-base-uncased''' )
_a = tokenizer(['''a picture of'''] ).input_ids
_a = hf_model.generate(_UpperCamelCase , _UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 38_61, 19_97, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
_a = hf_model.generate(_UpperCamelCase )
assert out[0].tolist() == [3_05_22, 10_37, 24_50, 35_64, 20_06, 19_96, 35_09, 20_07, 20_14, 38_99, 1_02]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(_UpperCamelCase )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_a = (
'''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth'''
)
_a = blip_vqa(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit='''base''' )
vqa_model.eval()
_a = vqa_model.state_dict()
for key in modified_state_dict.copy():
_a = modified_state_dict.pop(_UpperCamelCase )
_a = rename_key(_UpperCamelCase )
_a = value
_a = BlipForQuestionAnswering(_UpperCamelCase )
hf_vqa_model.load_state_dict(_UpperCamelCase )
_a = ['''How many dogs are in this image?''']
_a = tokenizer(_UpperCamelCase , return_tensors='''pt''' ).input_ids
_a = hf_vqa_model.generate(_UpperCamelCase , _UpperCamelCase )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + '''_vqa''' )
_a = '''https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth'''
_a = blip_itm(pretrained=_UpperCamelCase , image_size=_UpperCamelCase , vit='''base''' )
itm_model.eval()
_a = itm_model.state_dict()
for key in modified_state_dict.copy():
_a = modified_state_dict.pop(_UpperCamelCase )
_a = rename_key(_UpperCamelCase )
_a = value
_a = BlipForImageTextRetrieval(_UpperCamelCase )
_a = ['''A picture of a woman with a dog sitting in a beach''']
_a = tokenizer(
_UpperCamelCase , return_tensors='''pt''' , padding='''max_length''' , truncation=_UpperCamelCase , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(_UpperCamelCase )
hf_itm_model.eval()
_a = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
_a = hf_itm_model(_UpperCamelCase , _UpperCamelCase , use_itm_head=_UpperCamelCase )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + '''_itm''' )
if __name__ == "__main__":
lowerCamelCase :int = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
lowerCamelCase :Tuple = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 487 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
_lowerCAmelCase : Optional[Any] = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Dict = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : Any = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase : int = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
_lowerCAmelCase : str = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 604 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __snake_case ( SCREAMING_SNAKE_CASE ):
def __init__( self ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = params
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array([len(a_ ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self ,a_ ):
"""simple docstring"""
return (self.token_ids[index], self.lengths[index])
def __len__( self ):
"""simple docstring"""
return len(self.lengths )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.params.max_model_input_size
lowerCAmelCase__ = self.lengths > max_len
logger.info(f'Splitting {sum(a_ )} too long sequences.' )
def divide_chunks(a_ ,a_ ):
return [l[i : i + n] for i in range(0 ,len(a_ ) ,a_ )]
lowerCAmelCase__ = []
lowerCAmelCase__ = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids ,self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ = []
for sub_s in divide_chunks(seq_ ,max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ = np.insert(a_ ,0 ,a_ )
if sub_s[-1] != sep_id:
lowerCAmelCase__ = np.insert(a_ ,len(a_ ) ,a_ )
assert len(a_ ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a_ )
new_tok_ids.extend(a_ )
new_lengths.extend([len(a_ ) for l in sub_seqs] )
lowerCAmelCase__ = np.array(a_ )
lowerCAmelCase__ = np.array(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = self.lengths > 11
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(f'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = len(self )
lowerCAmelCase__ = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ = self.token_ids[indices]
lowerCAmelCase__ = self.lengths[indices]
lowerCAmelCase__ = len(self )
logger.info(f'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
if not self.params.is_master:
return
logger.info(f'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def SCREAMING_SNAKE_CASE_ ( self ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = [t[0] for t in batch]
lowerCAmelCase__ = [t[1] for t in batch]
assert len(a_ ) == len(a_ )
# Max for paddings
lowerCAmelCase__ = max(a_ )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ = [list(t.astype(a_ ) ) + [pad_idx] * (max_seq_len_ - len(a_ )) for t in token_ids]
assert len(tk_ ) == len(a_ )
assert all(len(a_ ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ = torch.tensor(a_ ) # (bs)
return tk_t, lg_t
| 604 | 1 |
"""simple docstring"""
import argparse
import json
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
VideoMAEConfig,
VideoMAEForPreTraining,
VideoMAEForVideoClassification,
VideoMAEImageProcessor,
)
def a ( __UpperCAmelCase : int ) -> Optional[Any]:
__magic_name__: Dict = VideoMAEConfig()
set_architecture_configs(__UpperCAmelCase , __UpperCAmelCase )
if "finetuned" not in model_name:
__magic_name__: Dict = False
if "finetuned" in model_name:
__magic_name__: Tuple = """huggingface/label-files"""
if "kinetics" in model_name:
__magic_name__: Any = 4_0_0
__magic_name__: Any = """kinetics400-id2label.json"""
elif "ssv2" in model_name:
__magic_name__: List[str] = 1_7_4
__magic_name__: Any = """something-something-v2-id2label.json"""
else:
raise ValueError("""Model name should either contain 'kinetics' or 'ssv2' in case it's fine-tuned.""" )
__magic_name__: int = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase , repo_type="""dataset""" ) , """r""" ) )
__magic_name__: Union[str, Any] = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
__magic_name__: int = idalabel
__magic_name__: str = {v: k for k, v in idalabel.items()}
return config
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[Any] ) -> Optional[int]:
if "small" in model_name:
__magic_name__: List[Any] = 3_8_4
__magic_name__: Optional[int] = 1_5_3_6
__magic_name__: Union[str, Any] = 1_2
__magic_name__: List[Any] = 1_6
__magic_name__: Tuple = 1_2
__magic_name__: Optional[Any] = 3
__magic_name__: Dict = 1_9_2
__magic_name__: Tuple = 7_6_8
elif "large" in model_name:
__magic_name__: int = 1_0_2_4
__magic_name__: Optional[int] = 4_0_9_6
__magic_name__: Dict = 2_4
__magic_name__: Any = 1_6
__magic_name__: Optional[int] = 1_2
__magic_name__: str = 8
__magic_name__: Optional[Any] = 5_1_2
__magic_name__: Tuple = 2_0_4_8
elif "huge" in model_name:
__magic_name__: str = 1_2_8_0
__magic_name__: Optional[int] = 5_1_2_0
__magic_name__: Any = 3_2
__magic_name__: int = 1_6
__magic_name__: Any = 1_2
__magic_name__: int = 8
__magic_name__: Any = 6_4_0
__magic_name__: Tuple = 2_5_6_0
elif "base" not in model_name:
raise ValueError("""Model name should include either \"small\", \"base\", \"large\", or \"huge\"""" )
def a ( __UpperCAmelCase : str ) -> Optional[int]:
if "encoder." in name:
__magic_name__: List[Any] = name.replace("""encoder.""" , """""" )
if "cls_token" in name:
__magic_name__: Any = name.replace("""cls_token""" , """videomae.embeddings.cls_token""" )
if "decoder_pos_embed" in name:
__magic_name__: Tuple = name.replace("""decoder_pos_embed""" , """decoder.decoder_pos_embed""" )
if "pos_embed" in name and "decoder" not in name:
__magic_name__: Any = name.replace("""pos_embed""" , """videomae.embeddings.position_embeddings""" )
if "patch_embed.proj" in name:
__magic_name__: Tuple = name.replace("""patch_embed.proj""" , """videomae.embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__magic_name__: Optional[int] = name.replace("""patch_embed.norm""" , """videomae.embeddings.norm""" )
if "decoder.blocks" in name:
__magic_name__: int = name.replace("""decoder.blocks""" , """decoder.decoder_layers""" )
if "blocks" in name:
__magic_name__: Any = name.replace("""blocks""" , """videomae.encoder.layer""" )
if "attn.proj" in name:
__magic_name__: Optional[int] = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name and "bias" not in name:
__magic_name__: str = name.replace("""attn""" , """attention.self""" )
if "attn" in name:
__magic_name__: Dict = name.replace("""attn""" , """attention.attention""" )
if "norm1" in name:
__magic_name__: Optional[Any] = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__magic_name__: str = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__magic_name__: List[str] = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__magic_name__: Union[str, Any] = name.replace("""mlp.fc2""" , """output.dense""" )
if "decoder_embed" in name:
__magic_name__: List[Any] = name.replace("""decoder_embed""" , """decoder.decoder_embed""" )
if "decoder_norm" in name:
__magic_name__: Tuple = name.replace("""decoder_norm""" , """decoder.decoder_norm""" )
if "decoder_pred" in name:
__magic_name__: Tuple = name.replace("""decoder_pred""" , """decoder.decoder_pred""" )
if "norm.weight" in name and "decoder" not in name and "fc" not in name:
__magic_name__: List[Any] = name.replace("""norm.weight""" , """videomae.layernorm.weight""" )
if "norm.bias" in name and "decoder" not in name and "fc" not in name:
__magic_name__: List[str] = name.replace("""norm.bias""" , """videomae.layernorm.bias""" )
if "head" in name and "decoder" not in name:
__magic_name__: Dict = name.replace("""head""" , """classifier""" )
return name
def a ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict ) -> List[Any]:
for key in orig_state_dict.copy().keys():
__magic_name__: List[Any] = orig_state_dict.pop(__UpperCAmelCase )
if key.startswith("""encoder.""" ):
__magic_name__: Optional[Any] = key.replace("""encoder.""" , """""" )
if "qkv" in key:
__magic_name__: List[Any] = key.split(""".""" )
if key.startswith("""decoder.blocks""" ):
__magic_name__: Tuple = config.decoder_hidden_size
__magic_name__: Optional[Any] = int(key_split[2] )
__magic_name__: Tuple = """decoder.decoder_layers."""
if "weight" in key:
__magic_name__: List[str] = val[:dim, :]
__magic_name__: Optional[Any] = val[dim : dim * 2, :]
__magic_name__: Tuple = val[-dim:, :]
else:
__magic_name__: Tuple = config.hidden_size
__magic_name__: Tuple = int(key_split[1] )
__magic_name__: Tuple = """videomae.encoder.layer."""
if "weight" in key:
__magic_name__: Any = val[:dim, :]
__magic_name__: List[str] = val[dim : dim * 2, :]
__magic_name__: Tuple = val[-dim:, :]
else:
__magic_name__: Dict = val
return orig_state_dict
def a ( ) -> List[str]:
__magic_name__: int = hf_hub_download(
repo_id="""hf-internal-testing/spaghetti-video""" , filename="""eating_spaghetti.npy""" , repo_type="""dataset""" )
__magic_name__: Any = np.load(__UpperCAmelCase )
return list(__UpperCAmelCase )
def a ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : int , __UpperCAmelCase : List[str] , __UpperCAmelCase : Tuple ) -> Tuple:
__magic_name__: Tuple = get_videomae_config(__UpperCAmelCase )
if "finetuned" in model_name:
__magic_name__: int = VideoMAEForVideoClassification(__UpperCAmelCase )
else:
__magic_name__: List[Any] = VideoMAEForPreTraining(__UpperCAmelCase )
# download original checkpoint, hosted on Google Drive
__magic_name__: Dict = """pytorch_model.bin"""
gdown.cached_download(__UpperCAmelCase , __UpperCAmelCase , quiet=__UpperCAmelCase )
__magic_name__: str = torch.load(__UpperCAmelCase , map_location="""cpu""" )
if "model" in files:
__magic_name__: Union[str, Any] = files["""model"""]
else:
__magic_name__: List[Any] = files["""module"""]
__magic_name__: List[Any] = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
# verify model on basic input
__magic_name__: Any = VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
__magic_name__: Any = prepare_video()
__magic_name__: Optional[int] = image_processor(__UpperCAmelCase , return_tensors="""pt""" )
if "finetuned" not in model_name:
__magic_name__: Dict = hf_hub_download(repo_id="""hf-internal-testing/bool-masked-pos""" , filename="""bool_masked_pos.pt""" )
__magic_name__: List[Any] = torch.load(__UpperCAmelCase )
__magic_name__: int = model(**__UpperCAmelCase )
__magic_name__: int = outputs.logits
__magic_name__: Optional[int] = [
"""videomae-small-finetuned-kinetics""",
"""videomae-small-finetuned-ssv2""",
# Kinetics-400 checkpoints (short = pretrained only for 800 epochs instead of 1600)
"""videomae-base-short""",
"""videomae-base-short-finetuned-kinetics""",
"""videomae-base""",
"""videomae-base-finetuned-kinetics""",
"""videomae-large""",
"""videomae-large-finetuned-kinetics""",
"""videomae-huge-finetuned-kinetics""",
# Something-Something-v2 checkpoints (short = pretrained only for 800 epochs instead of 2400)
"""videomae-base-short-ssv2""",
"""videomae-base-short-finetuned-ssv2""",
"""videomae-base-ssv2""",
"""videomae-base-finetuned-ssv2""",
]
# NOTE: logits were tested with image_mean and image_std equal to [0.5, 0.5, 0.5] and [0.5, 0.5, 0.5]
if model_name == "videomae-small-finetuned-kinetics":
__magic_name__: Optional[int] = torch.Size([1, 4_0_0] )
__magic_name__: int = torch.tensor([-0.92_91, -0.40_61, -0.93_07] )
elif model_name == "videomae-small-finetuned-ssv2":
__magic_name__: str = torch.Size([1, 1_7_4] )
__magic_name__: str = torch.tensor([0.26_71, -0.46_89, -0.82_35] )
elif model_name == "videomae-base":
__magic_name__: Any = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: Optional[Any] = torch.tensor([[0.77_39, 0.79_68, 0.70_89], [0.67_01, 0.74_87, 0.62_09], [0.42_87, 0.51_58, 0.47_73]] )
elif model_name == "videomae-base-short":
__magic_name__: List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: int = torch.tensor([[0.79_94, 0.96_12, 0.85_08], [0.74_01, 0.89_58, 0.83_02], [0.58_62, 0.74_68, 0.73_25]] )
# we verified the loss both for normalized and unnormalized targets for this one
__magic_name__: Optional[int] = torch.tensor([0.51_42] ) if config.norm_pix_loss else torch.tensor([0.64_69] )
elif model_name == "videomae-large":
__magic_name__: str = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: int = torch.tensor([[0.71_49, 0.79_97, 0.69_66], [0.67_68, 0.78_69, 0.69_48], [0.51_39, 0.62_21, 0.56_05]] )
elif model_name == "videomae-large-finetuned-kinetics":
__magic_name__: str = torch.Size([1, 4_0_0] )
__magic_name__: Union[str, Any] = torch.tensor([0.07_71, 0.00_11, -0.36_25] )
elif model_name == "videomae-huge-finetuned-kinetics":
__magic_name__: Any = torch.Size([1, 4_0_0] )
__magic_name__: Tuple = torch.tensor([0.24_33, 0.16_32, -0.48_94] )
elif model_name == "videomae-base-short-finetuned-kinetics":
__magic_name__: List[Any] = torch.Size([1, 4_0_0] )
__magic_name__: int = torch.tensor([0.65_88, 0.09_90, -0.24_93] )
elif model_name == "videomae-base-finetuned-kinetics":
__magic_name__: List[str] = torch.Size([1, 4_0_0] )
__magic_name__: int = torch.tensor([0.36_69, -0.06_88, -0.24_21] )
elif model_name == "videomae-base-short-ssv2":
__magic_name__: str = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: str = torch.tensor([[0.47_12, 0.52_96, 0.57_86], [0.22_78, 0.27_29, 0.40_26], [0.03_52, 0.07_30, 0.25_06]] )
elif model_name == "videomae-base-short-finetuned-ssv2":
__magic_name__: List[Any] = torch.Size([1, 1_7_4] )
__magic_name__: Optional[int] = torch.tensor([-0.05_37, -0.15_39, -0.32_66] )
elif model_name == "videomae-base-ssv2":
__magic_name__: List[str] = torch.Size([1, 1_4_0_8, 1_5_3_6] )
__magic_name__: Any = torch.tensor([[0.81_31, 0.87_27, 0.85_46], [0.73_66, 0.93_77, 0.88_70], [0.59_35, 0.88_74, 0.85_64]] )
elif model_name == "videomae-base-finetuned-ssv2":
__magic_name__: Optional[Any] = torch.Size([1, 1_7_4] )
__magic_name__: Any = torch.tensor([0.19_61, -0.83_37, -0.63_89] )
else:
raise ValueError(f'Model name not supported. Should be one of {model_names}' )
# verify logits
assert logits.shape == expected_shape
if "finetuned" in model_name:
assert torch.allclose(logits[0, :3] , __UpperCAmelCase , atol=1E-4 )
else:
print("""Logits:""" , logits[0, :3, :3] )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1E-4 )
print("""Logits ok!""" )
# verify loss, if applicable
if model_name == "videomae-base-short":
__magic_name__: Tuple = outputs.loss
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1E-4 )
print("""Loss ok!""" )
if pytorch_dump_folder_path is not None:
print(f'Saving model and image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(__UpperCAmelCase )
model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
print("""Pushing to the hub...""" )
model.push_to_hub(__UpperCAmelCase , organization="""nielsr""" )
if __name__ == "__main__":
__lowerCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://drive.google.com/u/1/uc?id=1tEhLyskjb755TJ65ptsrafUG2llSwQE1&export=download&confirm=t&uuid=aa3276eb-fb7e-482a-adec-dc7171df14c4',
type=str,
help=(
'URL of the original PyTorch checkpoint (on Google Drive) you\'d like to convert. Should be a direct'
' download link.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='/Users/nielsrogge/Documents/VideoMAE/Test',
type=str,
help='Path to the output PyTorch model directory.',
)
parser.add_argument('--model_name', default='videomae-base', type=str, help='Name of the model.')
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__lowerCamelCase = parser.parse_args()
convert_videomae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 96 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"uw-madison/mra-base-512-4": "https://huggingface.co/uw-madison/mra-base-512-4/resolve/main/config.json",
}
class SCREAMING_SNAKE_CASE ( a ):
"""simple docstring"""
a_ : Tuple ="mra"
def __init__( self : Union[str, Any] , _snake_case : List[str]=5_0265 , _snake_case : Union[str, Any]=768 , _snake_case : Union[str, Any]=12 , _snake_case : Any=12 , _snake_case : str=3072 , _snake_case : int="gelu" , _snake_case : Tuple=0.1 , _snake_case : int=0.1 , _snake_case : Tuple=512 , _snake_case : Optional[Any]=1 , _snake_case : Union[str, Any]=0.02 , _snake_case : List[Any]=1E-5 , _snake_case : Optional[Any]="absolute" , _snake_case : List[Any]=4 , _snake_case : str="full" , _snake_case : Union[str, Any]=0 , _snake_case : Any=0 , _snake_case : str=1 , _snake_case : Union[str, Any]=0 , _snake_case : Optional[Any]=2 , **_snake_case : List[Any] , ) -> List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=_snake_case , bos_token_id=_snake_case , eos_token_id=_snake_case , **_snake_case )
a__ = vocab_size
a__ = max_position_embeddings
a__ = hidden_size
a__ = num_hidden_layers
a__ = num_attention_heads
a__ = intermediate_size
a__ = hidden_act
a__ = hidden_dropout_prob
a__ = attention_probs_dropout_prob
a__ = initializer_range
a__ = type_vocab_size
a__ = layer_norm_eps
a__ = position_embedding_type
a__ = block_per_row
a__ = approx_mode
a__ = initial_prior_first_n_blocks
a__ = initial_prior_diagonal_n_blocks
| 232 | 0 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import DetrConfig, MaskFormerConfig, SwinConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskFormerForInstanceSegmentation, MaskFormerModel
if is_vision_available():
from transformers import MaskFormerImageProcessor
if is_vision_available():
from PIL import Image
class _lowerCamelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=2 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=10 , lowerCAmelCase=3 , lowerCAmelCase=32 * 4 , lowerCAmelCase=32 * 6 , lowerCAmelCase=4 , lowerCAmelCase=32 , ) -> Any:
SCREAMING_SNAKE_CASE__: List[str]= parent
SCREAMING_SNAKE_CASE__: str= batch_size
SCREAMING_SNAKE_CASE__: List[str]= is_training
SCREAMING_SNAKE_CASE__: Tuple= use_auxiliary_loss
SCREAMING_SNAKE_CASE__: Optional[Any]= num_queries
SCREAMING_SNAKE_CASE__: Optional[int]= num_channels
SCREAMING_SNAKE_CASE__: Union[str, Any]= min_size
SCREAMING_SNAKE_CASE__: Any= max_size
SCREAMING_SNAKE_CASE__: Any= num_labels
SCREAMING_SNAKE_CASE__: Union[str, Any]= mask_feature_size
def UpperCamelCase_ ( self ) -> Dict:
SCREAMING_SNAKE_CASE__: Any= floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= torch.ones([self.batch_size, self.min_size, self.max_size] , device=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=lowerCAmelCase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__: List[Any]= (torch.rand((self.batch_size, self.num_labels) , device=lowerCAmelCase ) > 0.5).long()
SCREAMING_SNAKE_CASE__: Optional[int]= self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def UpperCamelCase_ ( self ) -> Any:
return MaskFormerConfig.from_backbone_and_decoder_configs(
backbone_config=SwinConfig(
depths=[1, 1, 1, 1] , ) , decoder_config=DetrConfig(
decoder_ffn_dim=128 , num_queries=self.num_queries , decoder_attention_heads=2 , d_model=self.mask_feature_size , ) , mask_feature_size=self.mask_feature_size , fpn_feature_size=self.mask_feature_size , num_channels=self.num_channels , num_labels=self.num_labels , )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Union[str, Any]= self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: str= {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= output.encoder_hidden_states
SCREAMING_SNAKE_CASE__: str= output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__: Tuple= output.transformer_decoder_hidden_states
self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(lowerCAmelCase ) , config.decoder_config.decoder_layers )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ) -> Optional[Any]:
with torch.no_grad():
SCREAMING_SNAKE_CASE__: str= MaskFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__: Dict= model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , output_hidden_states=lowerCAmelCase )
# the correct shape of output.transformer_decoder_hidden_states ensure the correcteness of the
# encoder and pixel decoder
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.mask_feature_size) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(lowerCAmelCase , lowerCAmelCase )
def UpperCamelCase_ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= MaskFormerForInstanceSegmentation(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
def comm_check_on_output(lowerCAmelCase ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: str= model(pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase )
comm_check_on_output(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= model(
pixel_values=lowerCAmelCase , pixel_mask=lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase )
comm_check_on_output(lowerCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class _lowerCamelCase ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
__a = (MaskFormerModel, MaskFormerForInstanceSegmentation) if is_torch_available() else ()
__a = (
{"feature-extraction": MaskFormerModel, "image-segmentation": MaskFormerForInstanceSegmentation}
if is_torch_available()
else {}
)
__a = False
__a = False
__a = False
__a = False
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__: Tuple= MaskFormerModelTester(self )
SCREAMING_SNAKE_CASE__: Dict= ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
self.config_tester.run_common_tests()
def UpperCamelCase_ ( self ) -> List[str]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: List[Any]= self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskformer_instance_segmentation_head_model(*lowerCAmelCase )
@unittest.skip(reason='''MaskFormer does not use inputs_embeds''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
@unittest.skip(reason='''MaskFormer does not have a get_input_embeddings method''' )
def UpperCamelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip(reason='''MaskFormer is not a generative model''' )
def UpperCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='''MaskFormer does not use token embeddings''' )
def UpperCamelCase_ ( self ) -> Optional[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''MaskFormer has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def UpperCamelCase_ ( self ) -> Dict:
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def UpperCamelCase_ ( self ) -> Optional[int]:
pass
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__: List[str]= [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__: int= ['''pixel_values''']
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
@slow
def UpperCamelCase_ ( self ) -> List[Any]:
for model_name in ["facebook/maskformer-swin-small-coco"]:
SCREAMING_SNAKE_CASE__: Any= MaskFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Tuple= (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__: int= {
'''pixel_values''': torch.randn((2, 3, *size) , device=lowerCAmelCase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=lowerCAmelCase ),
'''class_labels''': torch.zeros(2 , 10 , device=lowerCAmelCase ).long(),
}
SCREAMING_SNAKE_CASE__: Any= MaskFormerForInstanceSegmentation(MaskFormerConfig() ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= model(**lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
def UpperCamelCase_ ( self ) -> Optional[int]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: int= self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskformer_model(lowerCAmelCase , **lowerCAmelCase , output_hidden_states=lowerCAmelCase )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__: str= model_class(lowerCAmelCase ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Dict= model(**lowerCAmelCase , output_attentions=lowerCAmelCase )
self.assertTrue(outputs.attentions is not None )
def UpperCamelCase_ ( self ) -> List[str]:
if not self.model_tester.is_training:
return
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__: Union[str, Any]= self.all_model_classes[1]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: List[Any]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: Union[str, Any]= model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase ).loss
loss.backward()
def UpperCamelCase_ ( self ) -> Optional[int]:
# only MaskFormerForInstanceSegmentation has the loss
SCREAMING_SNAKE_CASE__: Optional[int]= self.all_model_classes[1]
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Dict= self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__: Dict= True
SCREAMING_SNAKE_CASE__: Optional[int]= True
SCREAMING_SNAKE_CASE__: Union[str, Any]= model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__: int= model(lowerCAmelCase , mask_labels=lowerCAmelCase , class_labels=lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Optional[int]= outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__: List[str]= outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
# we requires_grad=True in inputs_embeds (line 2152), the original implementation don't
SCREAMING_SNAKE_CASE__: List[str]= outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__: Any= outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=lowerCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowercase_ : int = 1E-4
def A__ ( ):
SCREAMING_SNAKE_CASE__: Tuple= Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class _lowerCamelCase ( unittest.TestCase ):
@cached_property
def UpperCamelCase_ ( self ) -> List[Any]:
return (
MaskFormerImageProcessor.from_pretrained('''facebook/maskformer-swin-small-coco''' )
if is_vision_available()
else None
)
def UpperCamelCase_ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE__: str= MaskFormerModel.from_pretrained('''facebook/maskformer-swin-small-coco''' ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: Any= self.default_image_processor
SCREAMING_SNAKE_CASE__: Optional[Any]= prepare_img()
SCREAMING_SNAKE_CASE__: Dict= image_processor(lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: str= inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[Any]= model(**lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= torch.tensor(
[[-0.0482, 0.9228, 0.4951], [-0.2547, 0.8017, 0.8527], [-0.0069, 0.3385, -0.0089]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Any= torch.tensor(
[[-0.8422, -0.8434, -0.9718], [-1.0144, -0.5565, -0.4195], [-1.0038, -0.4484, -0.1961]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__: Optional[Any]= torch.tensor(
[[0.2852, -0.0159, 0.9735], [0.6254, 0.1858, 0.8529], [-0.0680, -0.4116, 1.8413]] ).to(lowerCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> List[Any]:
SCREAMING_SNAKE_CASE__: List[Any]= (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__: List[str]= self.default_image_processor
SCREAMING_SNAKE_CASE__: str= prepare_img()
SCREAMING_SNAKE_CASE__: Any= image_processor(lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[str]= inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: Tuple= model(**lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__: Tuple= outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__: Any= [
[-1.3737124, -1.7724937, -1.9364233],
[-1.5977281, -1.9867939, -2.1523695],
[-1.5795398, -1.9269832, -2.093942],
]
SCREAMING_SNAKE_CASE__: Any= torch.tensor(lowerCAmelCase ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__: str= outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__: List[str]= torch.tensor(
[
[1.6512e00, -5.2572e00, -3.3519e00],
[3.6169e-02, -5.9025e00, -2.9313e00],
[1.0766e-04, -7.7630e00, -5.1263e00],
] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__: Dict= (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-resnet101-coco-stuff''' )
.to(lowerCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__: int= self.default_image_processor
SCREAMING_SNAKE_CASE__: Optional[int]= prepare_img()
SCREAMING_SNAKE_CASE__: str= image_processor(lowerCAmelCase , return_tensors='''pt''' ).to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: List[Any]= inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(lowerCAmelCase , (1, 3, 800, 1088) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__: int= model(**lowerCAmelCase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__: int= outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.decoder_config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) , )
SCREAMING_SNAKE_CASE__: str= [[-0.9046, -2.6366, -4.6062], [-3.4179, -5.7890, -8.8057], [-4.9179, -7.6560, -10.7711]]
SCREAMING_SNAKE_CASE__: Optional[int]= torch.tensor(lowerCAmelCase ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__: Union[str, Any]= outputs.class_queries_logits
self.assertEqual(
class_queries_logits.shape , (1, model.config.decoder_config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__: List[str]= torch.tensor(
[[4.7188, -3.2585, -2.8857], [6.6871, -2.9181, -1.2487], [7.2449, -2.2764, -2.1874]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def UpperCamelCase_ ( self ) -> str:
SCREAMING_SNAKE_CASE__: List[str]= (
MaskFormerForInstanceSegmentation.from_pretrained('''facebook/maskformer-swin-small-coco''' )
.to(lowerCAmelCase )
.eval()
)
SCREAMING_SNAKE_CASE__: Any= self.default_image_processor
SCREAMING_SNAKE_CASE__: List[Any]= image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__: Dict= inputs['''pixel_values'''].to(lowerCAmelCase )
SCREAMING_SNAKE_CASE__: int= [el.to(lowerCAmelCase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__: int= [el.to(lowerCAmelCase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__: List[str]= model(**lowerCAmelCase )
self.assertTrue(outputs.loss is not None )
| 107 | def A__ ( snake_case_ : str ):
if not head:
return True
# split the list to two parts
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Optional[int]= head.next, head
while fast and fast.next:
SCREAMING_SNAKE_CASE__: Dict= fast.next.next
SCREAMING_SNAKE_CASE__: Union[str, Any]= slow.next
SCREAMING_SNAKE_CASE__: Union[str, Any]= slow.next
SCREAMING_SNAKE_CASE__: Union[str, Any]= None # Don't forget here! But forget still works!
# reverse the second part
SCREAMING_SNAKE_CASE__: Optional[int]= None
while second:
SCREAMING_SNAKE_CASE__: Any= second.next
SCREAMING_SNAKE_CASE__: int= node
SCREAMING_SNAKE_CASE__: Optional[Any]= second
SCREAMING_SNAKE_CASE__: Any= nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
SCREAMING_SNAKE_CASE__: Tuple= node.next
SCREAMING_SNAKE_CASE__: Optional[int]= head.next
return True
def A__ ( snake_case_ : Optional[Any] ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
SCREAMING_SNAKE_CASE__: List[Any]= head
while fast and fast.next:
SCREAMING_SNAKE_CASE__, SCREAMING_SNAKE_CASE__: Any= fast.next.next, slow.next
# 2. Push the second half into the stack
SCREAMING_SNAKE_CASE__: Optional[Any]= [slow.val]
while slow.next:
SCREAMING_SNAKE_CASE__: Optional[int]= slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
SCREAMING_SNAKE_CASE__: Tuple= cur.next
return True
def A__ ( snake_case_ : Any ):
if not head or not head.next:
return True
SCREAMING_SNAKE_CASE__: Optional[int]= {}
SCREAMING_SNAKE_CASE__: Union[str, Any]= 0
while head:
if head.val in d:
d[head.val].append(snake_case_ )
else:
SCREAMING_SNAKE_CASE__: Optional[int]= [pos]
SCREAMING_SNAKE_CASE__: Dict= head.next
pos += 1
SCREAMING_SNAKE_CASE__: Dict= pos - 1
SCREAMING_SNAKE_CASE__: str= 0
for v in d.values():
if len(snake_case_ ) % 2 != 0:
middle += 1
else:
SCREAMING_SNAKE_CASE__: List[Any]= 0
for i in range(0 , len(snake_case_ ) ):
if v[i] + v[len(snake_case_ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 107 | 1 |
from __future__ import annotations
from dataclasses import dataclass
@dataclass
class _lowerCamelCase :
__a = 42
__a = None
__a = None
def A__ ( snake_case_ : TreeNode | None ):
# Validation
def is_valid_tree(snake_case_ : TreeNode | None ) -> bool:
if node is None:
return True
if not isinstance(snake_case_ , snake_case_ ):
return False
try:
float(node.data )
except (TypeError, ValueError):
return False
return is_valid_tree(node.left ) and is_valid_tree(node.right )
if not is_valid_tree(snake_case_ ):
raise ValueError(
'''Each node should be type of TreeNode and data should be float.''' )
def is_binary_search_tree_recursive_check(
snake_case_ : TreeNode | None , snake_case_ : float , snake_case_ : float ) -> bool:
if node is None:
return True
return (
left_bound < node.data < right_bound
and is_binary_search_tree_recursive_check(node.left , snake_case_ , node.data )
and is_binary_search_tree_recursive_check(
node.right , node.data , snake_case_ )
)
return is_binary_search_tree_recursive_check(snake_case_ , -float('''inf''' ) , float('''inf''' ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 64 |
'''simple docstring'''
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
def _a (self ):
A_ : Union[str, Any] = tempfile.mkdtemp()
A_ : List[Any] = BlipImageProcessor()
A_ : Optional[int] = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
A_ : Any = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
A_ : Dict = InstructBlipProcessor(lowercase , lowercase , lowercase )
processor.save_pretrained(self.tmpdirname )
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).tokenizer
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).image_processor
def _a (self , **lowercase ):
return AutoProcessor.from_pretrained(self.tmpdirname , **lowercase ).qformer_tokenizer
def _a (self ):
shutil.rmtree(self.tmpdirname )
def _a (self ):
A_ : List[str] = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
A_ : Optional[Any] = [Image.fromarray(np.moveaxis(lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _a (self ):
A_ : str = InstructBlipProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() , qformer_tokenizer=self.get_qformer_tokenizer() , )
processor.save_pretrained(self.tmpdirname )
A_ : Tuple = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A_ : Optional[Any] = self.get_image_processor(do_normalize=lowercase , padding_value=1.0 )
A_ : str = InstructBlipProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowercase )
self.assertIsInstance(processor.qformer_tokenizer , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : List[str] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = self.prepare_image_inputs()
A_ : Union[str, Any] = image_processor(lowercase , return_tensors="""np""" )
A_ : Dict = processor(images=lowercase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _a (self ):
A_ : List[Any] = self.get_image_processor()
A_ : Optional[Any] = self.get_tokenizer()
A_ : Any = self.get_qformer_tokenizer()
A_ : List[str] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : str = """lower newer"""
A_ : List[Any] = processor(text=lowercase )
A_ : Optional[int] = tokenizer(lowercase , return_token_type_ids=lowercase )
A_ : List[Any] = qformer_tokenizer(lowercase , return_token_type_ids=lowercase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] , encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] , encoded_processor["""qformer_""" + key] )
def _a (self ):
A_ : int = self.get_image_processor()
A_ : Union[str, Any] = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Any = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Optional[int] = """lower newer"""
A_ : Optional[int] = self.prepare_image_inputs()
A_ : Tuple = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , )
# test if it raises when no input is passed
with pytest.raises(lowercase ):
processor()
def _a (self ):
A_ : Dict = self.get_image_processor()
A_ : str = self.get_tokenizer()
A_ : Optional[int] = self.get_qformer_tokenizer()
A_ : int = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A_ : Optional[int] = processor.batch_decode(lowercase )
A_ : Dict = tokenizer.batch_decode(lowercase )
self.assertListEqual(lowercase , lowercase )
def _a (self ):
A_ : Any = self.get_image_processor()
A_ : Dict = self.get_tokenizer()
A_ : Union[str, Any] = self.get_qformer_tokenizer()
A_ : Optional[int] = InstructBlipProcessor(
tokenizer=lowercase , image_processor=lowercase , qformer_tokenizer=lowercase )
A_ : List[Any] = """lower newer"""
A_ : Optional[Any] = self.prepare_image_inputs()
A_ : Any = processor(text=lowercase , images=lowercase )
self.assertListEqual(
list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] , ) | 667 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def lowercase_ ( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[int] = (3, 32, 128)
__UpperCAmelCase: Union[str, Any] = tempfile.mkdtemp()
# fmt: off
__UpperCAmelCase: List[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
__UpperCAmelCase: Optional[int] = dict(zip(snake_case_ , range(len(snake_case_ ) ) ) )
__UpperCAmelCase: int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(snake_case_ ) + """\n""" )
__UpperCAmelCase: List[str] = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 128},
}
__UpperCAmelCase: List[str] = os.path.join(self.tmpdirname , snake_case_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(snake_case_ , snake_case_ )
def lowercase_ ( self , **snake_case_ ):
'''simple docstring'''
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase_ ( self , **snake_case_ ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )
__UpperCAmelCase: Union[str, Any] = Image.fromarray(np.moveaxis(snake_case_ , 0 , -1 ) )
return image_input
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Dict = self.get_tokenizer()
__UpperCAmelCase: str = self.get_image_processor()
__UpperCAmelCase: List[Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase: Tuple = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=snake_case_ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.get_tokenizer()
__UpperCAmelCase: int = self.get_image_processor()
__UpperCAmelCase: List[Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
processor.save_pretrained(self.tmpdirname )
__UpperCAmelCase: Union[str, Any] = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
__UpperCAmelCase: Optional[Any] = self.get_image_processor(do_normalize=snake_case_ , padding_value=1.0 )
__UpperCAmelCase: Dict = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=snake_case_ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , snake_case_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: List[Any] = self.get_image_processor()
__UpperCAmelCase: Any = self.get_tokenizer()
__UpperCAmelCase: Optional[Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__UpperCAmelCase: Tuple = self.prepare_image_inputs()
__UpperCAmelCase: Optional[Any] = image_processor(snake_case_ , return_tensors="""np""" )
__UpperCAmelCase: Optional[int] = processor(images=snake_case_ , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Any = self.get_image_processor()
__UpperCAmelCase: Any = self.get_tokenizer()
__UpperCAmelCase: Optional[Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__UpperCAmelCase: Optional[int] = """test"""
__UpperCAmelCase: Optional[int] = processor(text=snake_case_ )
__UpperCAmelCase: Any = tokenizer(snake_case_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.get_image_processor()
__UpperCAmelCase: Any = self.get_tokenizer()
__UpperCAmelCase: Optional[int] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__UpperCAmelCase: Tuple = """test"""
__UpperCAmelCase: Tuple = self.prepare_image_inputs()
__UpperCAmelCase: Optional[Any] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(snake_case_ ):
processor()
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Optional[Any] = self.get_image_processor()
__UpperCAmelCase: Tuple = self.get_tokenizer()
__UpperCAmelCase: Union[str, Any] = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__UpperCAmelCase: Optional[int] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
__UpperCAmelCase: str = processor.char_decode(snake_case_ )
__UpperCAmelCase: Optional[Any] = tokenizer.batch_decode(snake_case_ )
__UpperCAmelCase: List[Any] = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(snake_case_ , snake_case_ )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: Union[str, Any] = self.get_image_processor()
__UpperCAmelCase: Dict = self.get_tokenizer()
__UpperCAmelCase: Tuple = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__UpperCAmelCase: List[Any] = None
__UpperCAmelCase: List[Any] = self.prepare_image_inputs()
__UpperCAmelCase: List[str] = processor(text=snake_case_ , images=snake_case_ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def lowercase_ ( self ):
'''simple docstring'''
__UpperCAmelCase: str = self.get_image_processor()
__UpperCAmelCase: Dict = self.get_tokenizer()
__UpperCAmelCase: Dict = MgpstrProcessor(tokenizer=snake_case_ , image_processor=snake_case_ )
__UpperCAmelCase: Union[str, Any] = torch.randn(1 , 27 , 38 )
__UpperCAmelCase: List[str] = torch.randn(1 , 27 , 5_0257 )
__UpperCAmelCase: Union[str, Any] = torch.randn(1 , 27 , 3_0522 )
__UpperCAmelCase: Tuple = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] ) | 466 | '''simple docstring'''
def UpperCamelCase__ ( _lowercase : list ) -> list:
if len(_lowercase ) <= 1:
return lst
__UpperCAmelCase: List[str] = 1
while i < len(_lowercase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
__UpperCAmelCase, __UpperCAmelCase: Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
__UpperCAmelCase: List[str] = 1
return lst
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = input('Enter numbers separated by a comma:\n').strip()
SCREAMING_SNAKE_CASE_ = [int(item) for item in user_input.split(',')]
print(gnome_sort(unsorted)) | 466 | 1 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCAmelCase__ = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def _A ( A__ ):
"""simple docstring"""
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 10:
__lowercase = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
__lowercase = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
__lowercase = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
__lowercase = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
__lowercase = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def _A ( A__ , A__ , A__ , A__ ):
"""simple docstring"""
__lowercase = {}
import re
__lowercase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowercase = re.compile(
R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
__lowercase = re.compile(
R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
__lowercase = re.compile(
R'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
__lowercase = re.compile(R'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__ ):
__lowercase = re_encoder_block_conv_in.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] )
__lowercase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"
__lowercase = re_encoder_block_conv_in.sub(A__ , A__ )
elif re_encoder_block_resnet.fullmatch(A__ ):
__lowercase = re_encoder_block_resnet.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] )
__lowercase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."
__lowercase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowercase = prefix + resnet_block
__lowercase = re_encoder_block_resnet.sub(A__ , A__ )
elif re_encoder_block_proj_out.fullmatch(A__ ):
__lowercase = re_encoder_block_proj_out.match(A__ )
__lowercase = regex_match.groups()
__lowercase = F"encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"
__lowercase = re_encoder_block_proj_out.sub(A__ , A__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__ ):
__lowercase = re_decoder_block_conv_out.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowercase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"
__lowercase = re_decoder_block_conv_out.sub(A__ , A__ )
elif re_decoder_block_resnet.fullmatch(A__ ):
__lowercase = re_decoder_block_resnet.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[2] ) * 2 + int(groups[3] ) - 2
__lowercase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."
__lowercase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowercase = prefix + resnet_block
__lowercase = re_decoder_block_resnet.sub(A__ , A__ )
elif re_decoder_block_proj_in.fullmatch(A__ ):
__lowercase = re_decoder_block_proj_in.match(A__ )
__lowercase = regex_match.groups()
__lowercase = F"decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"
__lowercase = re_decoder_block_proj_in.sub(A__ , A__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__ ):
__lowercase = re_prior_cond_conv_out.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowercase = F"conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"
__lowercase = re_prior_cond_conv_out.sub(A__ , A__ )
elif re_prior_cond_resnet.fullmatch(A__ ):
__lowercase = re_prior_cond_resnet.match(A__ )
__lowercase = regex_match.groups()
__lowercase = int(groups[1] ) * 2 + int(groups[2] ) - 2
__lowercase = {'''1''': 1, '''3''': 2}[groups[-2]]
__lowercase = F"conditioner_blocks.upsampler.upsample_block.{block_index}."
__lowercase = F"resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"
__lowercase = prefix + resnet_block
__lowercase = re_prior_cond_resnet.sub(A__ , A__ )
elif re_prior_cond_proj_in.fullmatch(A__ ):
__lowercase = re_prior_cond_proj_in.match(A__ )
__lowercase = regex_match.groups()
__lowercase = F"conditioner_blocks.upsampler.proj_in.{groups[-1]}"
__lowercase = re_prior_cond_proj_in.sub(A__ , A__ )
# keep original key
else:
__lowercase = original_key
__lowercase = replace_key(A__ )
if F"{key_prefix}.{key}" not in model_state_dict or key is None:
print(F"failed converting {original_key} to {key}, does not match" )
# handle missmatched shape
elif value.shape != model_state_dict[F"{key_prefix}.{key}"].shape:
__lowercase = model_state_dict[F"{key_prefix}.{key}"]
print(F"{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match" )
__lowercase = original_key
__lowercase = original_key
__lowercase = value
return new_dict
@torch.no_grad()
def _A ( A__=None , A__=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" ):
__lowercase = requests.get(F"{PREFIX}{file}" , allow_redirects=A__ )
os.makedirs(F"{pytorch_dump_folder_path}/" , exist_ok=A__ )
open(F"{pytorch_dump_folder_path}/{file.split('/' )[-1]}" , '''wb''' ).write(r.content )
__lowercase = MODEL_MAPPING[model_name.split('''/''' )[-1]]
__lowercase = JukeboxConfig.from_pretrained(A__ )
__lowercase = JukeboxModel(A__ )
__lowercase = []
__lowercase = {}
for i, dict_name in enumerate(A__ ):
__lowercase = torch.load(F"{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}" )['''model''']
__lowercase = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
__lowercase = old_dic[k]
elif k.endswith('''.w''' ):
__lowercase = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
__lowercase = old_dic[k]
else:
__lowercase = old_dic[k]
__lowercase = '''vqvae''' if i == 0 else F"priors.{3 - i}"
__lowercase = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ )
weight_dict.append(A__ )
__lowercase = weight_dict.pop(0 )
model.vqvae.load_state_dict(A__ )
for i in range(len(A__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A__ ).mkdir(exist_ok=A__ )
with open(F"{pytorch_dump_folder_path}/mapping.json" , '''w''' ) as txtfile:
json.dump(A__ , A__ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A__ )
return weight_dict
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCAmelCase__ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 41 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
__lowerCAmelCase = datasets.utils.logging.get_logger(__name__)
__lowerCAmelCase = ["""names""", """prefix"""]
__lowerCAmelCase = ["""warn_bad_lines""", """error_bad_lines""", """mangle_dupe_cols"""]
__lowerCAmelCase = ["""encoding_errors""", """on_bad_lines"""]
__lowerCAmelCase = ["""date_format"""]
@dataclass
class lowerCamelCase_ ( datasets.BuilderConfig ):
__lowercase : str = ","
__lowercase : Optional[str] = None
__lowercase : Optional[Union[int, List[int], str]] = "infer"
__lowercase : Optional[List[str]] = None
__lowercase : Optional[List[str]] = None
__lowercase : Optional[Union[int, str, List[int], List[str]]] = None
__lowercase : Optional[Union[List[int], List[str]]] = None
__lowercase : Optional[str] = None
__lowercase : bool = True
__lowercase : Optional[Literal["c", "python", "pyarrow"]] = None
__lowercase : Dict[Union[int, str], Callable[[Any], Any]] = None
__lowercase : Optional[list] = None
__lowercase : Optional[list] = None
__lowercase : bool = False
__lowercase : Optional[Union[int, List[int]]] = None
__lowercase : Optional[int] = None
__lowercase : Optional[Union[str, List[str]]] = None
__lowercase : bool = True
__lowercase : bool = True
__lowercase : bool = False
__lowercase : bool = True
__lowercase : Optional[str] = None
__lowercase : str = "."
__lowercase : Optional[str] = None
__lowercase : str = '"'
__lowercase : int = 0
__lowercase : Optional[str] = None
__lowercase : Optional[str] = None
__lowercase : Optional[str] = None
__lowercase : Optional[str] = None
__lowercase : bool = True
__lowercase : bool = True
__lowercase : int = 0
__lowercase : bool = True
__lowercase : bool = False
__lowercase : Optional[str] = None
__lowercase : int = 10000
__lowercase : Optional[datasets.Features] = None
__lowercase : Optional[str] = "strict"
__lowercase : Literal["error", "warn", "skip"] = "error"
__lowercase : Optional[str] = None
def lowercase ( self ) -> Any:
"""simple docstring"""
if self.delimiter is not None:
_UpperCamelCase = self.delimiter
if self.column_names is not None:
_UpperCamelCase = self.column_names
@property
def lowercase ( self ) -> int:
"""simple docstring"""
_UpperCamelCase = {
"sep": self.sep,
"header": self.header,
"names": self.names,
"index_col": self.index_col,
"usecols": self.usecols,
"prefix": self.prefix,
"mangle_dupe_cols": self.mangle_dupe_cols,
"engine": self.engine,
"converters": self.converters,
"true_values": self.true_values,
"false_values": self.false_values,
"skipinitialspace": self.skipinitialspace,
"skiprows": self.skiprows,
"nrows": self.nrows,
"na_values": self.na_values,
"keep_default_na": self.keep_default_na,
"na_filter": self.na_filter,
"verbose": self.verbose,
"skip_blank_lines": self.skip_blank_lines,
"thousands": self.thousands,
"decimal": self.decimal,
"lineterminator": self.lineterminator,
"quotechar": self.quotechar,
"quoting": self.quoting,
"escapechar": self.escapechar,
"comment": self.comment,
"encoding": self.encoding,
"dialect": self.dialect,
"error_bad_lines": self.error_bad_lines,
"warn_bad_lines": self.warn_bad_lines,
"skipfooter": self.skipfooter,
"doublequote": self.doublequote,
"memory_map": self.memory_map,
"float_precision": self.float_precision,
"chunksize": self.chunksize,
"encoding_errors": self.encoding_errors,
"on_bad_lines": self.on_bad_lines,
"date_format": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , lowerCamelCase_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class lowerCamelCase_ ( datasets.ArrowBasedBuilder ):
__lowercase : Optional[int] = CsvConfig
def lowercase ( self ) -> List[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def lowercase ( self , lowerCamelCase_ ) -> Dict:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(f'''At least one data file must be specified, but got data_files={self.config.data_files}''' )
_UpperCamelCase = dl_manager.download_and_extract(self.config.data_files )
if isinstance(lowerCamelCase_ , (str, list, tuple) ):
_UpperCamelCase = data_files
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = [files]
_UpperCamelCase = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_UpperCamelCase = []
for split_name, files in data_files.items():
if isinstance(lowerCamelCase_ , lowerCamelCase_ ):
_UpperCamelCase = [files]
_UpperCamelCase = [dl_manager.iter_files(lowerCamelCase_ ) for file in files]
splits.append(datasets.SplitGenerator(name=lowerCamelCase_ , gen_kwargs={"files": files} ) )
return splits
def lowercase ( self , lowerCamelCase_ ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
_UpperCamelCase = self.config.features.arrow_schema
if all(not require_storage_cast(lowerCamelCase_ ) for feature in self.config.features.values() ):
# cheaper cast
_UpperCamelCase = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=lowerCamelCase_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
_UpperCamelCase = table_cast(lowerCamelCase_ , lowerCamelCase_ )
return pa_table
def lowercase ( self , lowerCamelCase_ ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
_UpperCamelCase = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(lowerCamelCase_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(lowerCamelCase_ ) ):
_UpperCamelCase = pd.read_csv(lowerCamelCase_ , iterator=lowerCamelCase_ , dtype=lowerCamelCase_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(lowerCamelCase_ ):
_UpperCamelCase = pa.Table.from_pandas(lowerCamelCase_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(lowerCamelCase_ )
except ValueError as e:
logger.error(f'''Failed to read file \'{file}\' with error {type(lowerCamelCase_ )}: {e}''' )
raise
| 147 | 0 |
'''simple docstring'''
import gc
import unittest
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
PriorTransformer,
StableUnCLIPPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.testing_utils import enable_full_determinism, load_numpy, require_torch_gpu, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class UpperCAmelCase_ ( lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = StableUnCLIPPipeline
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_BATCH_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
__SCREAMING_SNAKE_CASE = TEXT_TO_IMAGE_IMAGE_PARAMS
# TODO(will) Expected attn_bias.stride(1) == 0 to be true, but got false
__SCREAMING_SNAKE_CASE = False
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = 32
UpperCamelCase : List[str] = embedder_hidden_size
# prior components
torch.manual_seed(0 )
UpperCamelCase : Optional[int] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase : Tuple = CLIPTextModelWithProjection(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=lowerCamelCase , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase : Dict = PriorTransformer(
num_attention_heads=2 , attention_head_dim=12 , embedding_dim=lowerCamelCase , num_layers=1 , )
torch.manual_seed(0 )
UpperCamelCase : str = DDPMScheduler(
variance_type="fixed_small_log" , prediction_type="sample" , num_train_timesteps=10_00 , clip_sample=lowerCamelCase , clip_sample_range=5.0 , beta_schedule="squaredcos_cap_v2" , )
# regular denoising components
torch.manual_seed(0 )
UpperCamelCase : Dict = StableUnCLIPImageNormalizer(embedding_dim=lowerCamelCase )
UpperCamelCase : Tuple = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCamelCase : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCamelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , ) )
torch.manual_seed(0 )
UpperCamelCase : Optional[Any] = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCamelCase , layers_per_block=1 , upcast_attention=lowerCamelCase , use_linear_projection=lowerCamelCase , )
torch.manual_seed(0 )
UpperCamelCase : Tuple = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCamelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCamelCase : Union[str, Any] = AutoencoderKL()
UpperCamelCase : Dict = {
# prior components
"prior_tokenizer": prior_tokenizer,
"prior_text_encoder": prior_text_encoder,
"prior": prior,
"prior_scheduler": prior_scheduler,
# image noising components
"image_normalizer": image_normalizer,
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder,
"unet": unet,
"scheduler": scheduler,
"vae": vae,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase=0 ) -> int:
'''simple docstring'''
if str(lowerCamelCase ).startswith("mps" ):
UpperCamelCase : Tuple = torch.manual_seed(lowerCamelCase )
else:
UpperCamelCase : str = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
UpperCamelCase : Any = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"prior_num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = torch_device == "cpu"
self._test_attention_slicing_forward_pass(test_max_difference=lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
'''simple docstring'''
UpperCamelCase : List[str] = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCamelCase )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
UpperCamelCase : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_anime_turtle_fp16.npy" )
UpperCamelCase : Optional[int] = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase : Tuple = pipe("anime turle" , generator=lowerCamelCase , output_type="np" )
UpperCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCamelCase : Any = StableUnCLIPPipeline.from_pretrained("fusing/stable-unclip-2-1-l" , torch_dtype=torch.floataa )
UpperCamelCase : Dict = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCamelCase : Optional[Any] = pipe(
"anime turtle" , prior_num_inference_steps=2 , num_inference_steps=2 , output_type="np" , )
UpperCamelCase : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 435 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyInpaintPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class UpperCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = KandinskyInpaintPipeline
__SCREAMING_SNAKE_CASE = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''', '''mask_image''']
__SCREAMING_SNAKE_CASE = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
'''mask_image''',
]
__SCREAMING_SNAKE_CASE = [
'''generator''',
'''height''',
'''width''',
'''latents''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
__SCREAMING_SNAKE_CASE = False
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return 32
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Union[str, Any]:
'''simple docstring'''
return self.time_input_dim
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
return self.time_input_dim * 4
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
return 1_00
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
UpperCamelCase : Optional[int] = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : List[str] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
UpperCamelCase : str = MultilingualCLIP(lowerCamelCase )
UpperCamelCase : Dict = text_encoder.eval()
return text_encoder
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : int = {
"in_channels": 9,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
UpperCamelCase : Dict = UNetaDConditionModel(**lowerCamelCase )
return model
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
'''simple docstring'''
torch.manual_seed(0 )
UpperCamelCase : Any = VQModel(**self.dummy_movq_kwargs )
return model
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.dummy_text_encoder
UpperCamelCase : Dict = self.dummy_tokenizer
UpperCamelCase : List[str] = self.dummy_unet
UpperCamelCase : int = self.dummy_movq
UpperCamelCase : Dict = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="linear" , beta_start=0.00085 , beta_end=0.012 , clip_sample=lowerCamelCase , set_alpha_to_one=lowerCamelCase , steps_offset=1 , prediction_type="epsilon" , thresholding=lowerCamelCase , )
UpperCamelCase : Union[str, Any] = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase=0 ) -> str:
'''simple docstring'''
UpperCamelCase : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
UpperCamelCase : Tuple = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(lowerCamelCase )
# create init_image
UpperCamelCase : Any = floats_tensor((1, 3, 64, 64) , rng=random.Random(lowerCamelCase ) ).to(lowerCamelCase )
UpperCamelCase : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCamelCase : Dict = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("RGB" ).resize((2_56, 2_56) )
# create mask
UpperCamelCase : int = np.ones((64, 64) , dtype=np.floataa )
UpperCamelCase : List[Any] = 0
if str(lowerCamelCase ).startswith("mps" ):
UpperCamelCase : Tuple = torch.manual_seed(lowerCamelCase )
else:
UpperCamelCase : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
UpperCamelCase : Any = {
"prompt": "horse",
"image": init_image,
"mask_image": mask,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 2,
"guidance_scale": 4.0,
"output_type": "np",
}
return inputs
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[int]:
'''simple docstring'''
UpperCamelCase : int = "cpu"
UpperCamelCase : Dict = self.get_dummy_components()
UpperCamelCase : Optional[int] = self.pipeline_class(**lowerCamelCase )
UpperCamelCase : List[Any] = pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
UpperCamelCase : Dict = pipe(**self.get_dummy_inputs(lowerCamelCase ) )
UpperCamelCase : Tuple = output.images
UpperCamelCase : Tuple = pipe(
**self.get_dummy_inputs(lowerCamelCase ) , return_dict=lowerCamelCase , )[0]
UpperCamelCase : List[Any] = image[0, -3:, -3:, -1]
UpperCamelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
print(f'''image.shape {image.shape}''' )
assert image.shape == (1, 64, 64, 3)
UpperCamelCase : List[str] = np.array(
[0.8326919, 0.73790467, 0.20918581, 0.9309612, 0.5511791, 0.43713328, 0.5513321, 0.49922934, 0.59497786] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE__ ( self ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase : int = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_inpaint_cat_with_hat_fp16.npy" )
UpperCamelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
UpperCamelCase : Dict = np.ones((7_68, 7_68) , dtype=np.floataa )
UpperCamelCase : str = 0
UpperCamelCase : Dict = "a hat"
UpperCamelCase : Union[str, Any] = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase )
UpperCamelCase : Optional[Any] = KandinskyInpaintPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-inpaint" , torch_dtype=torch.floataa )
UpperCamelCase : Union[str, Any] = pipeline.to(lowerCamelCase )
pipeline.set_progress_bar_config(disable=lowerCamelCase )
UpperCamelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCamelCase , UpperCamelCase : Any = pipe_prior(
lowerCamelCase , generator=lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
UpperCamelCase : Tuple = pipeline(
lowerCamelCase , image=lowerCamelCase , mask_image=lowerCamelCase , image_embeds=lowerCamelCase , negative_image_embeds=lowerCamelCase , generator=lowerCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , output_type="np" , )
UpperCamelCase : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(lowerCamelCase , lowerCamelCase )
| 435 | 1 |
"""simple docstring"""
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ () -> List[str]:
a_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("--model_ckpt" , type=_SCREAMING_SNAKE_CASE , default="microsoft/unixcoder-base-nine" )
parser.add_argument("--num_epochs" , type=_SCREAMING_SNAKE_CASE , default=5 )
parser.add_argument("--batch_size" , type=_SCREAMING_SNAKE_CASE , default=6 )
parser.add_argument("--gradient_accumulation_steps" , type=_SCREAMING_SNAKE_CASE , default=1 )
parser.add_argument("--freeze" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE )
parser.add_argument("--learning_rate" , type=_SCREAMING_SNAKE_CASE , default=5E-4 )
parser.add_argument("--seed" , type=_SCREAMING_SNAKE_CASE , default=0 )
parser.add_argument("--lr_scheduler_type" , type=_SCREAMING_SNAKE_CASE , default="cosine" )
parser.add_argument("--num_warmup_steps" , type=_SCREAMING_SNAKE_CASE , default=10 )
parser.add_argument("--weight_decay" , type=_SCREAMING_SNAKE_CASE , default=0.01 )
parser.add_argument("--output_dir" , type=_SCREAMING_SNAKE_CASE , default="./results" )
return parser.parse_args()
UpperCamelCase = load('accuracy')
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :str ) -> Optional[int]:
a_ , a_ : Tuple = eval_pred
a_ : int = np.argmax(_SCREAMING_SNAKE_CASE , axis=1 )
return metric.compute(predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE )
class UpperCAmelCase__ ( __lowerCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE ) -> None:
super().__init__()
a_ : Optional[Any] = trainer
def A ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) -> Optional[int]:
if control.should_evaluate:
a_ : int = deepcopy(_SCREAMING_SNAKE_CASE )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix="train" )
return control_copy
def lowerCAmelCase_ () -> List[str]:
a_ : int = get_args()
set_seed(args.seed )
a_ : List[Any] = load_dataset("codeparrot/codecomplex" , split="train" )
a_ : str = dataset.train_test_split(test_size=0.2 )
a_ : Any = train_test["test"].train_test_split(test_size=0.5 )
a_ : List[str] = DatasetDict(
{
"train": train_test["train"],
"test": test_validation["train"],
"valid": test_validation["test"],
} )
print("Loading tokenizer and model" )
a_ : Dict = AutoTokenizer.from_pretrained(args.model_ckpt )
a_ : Optional[Any] = tokenizer.eos_token
a_ : List[Any] = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a_ : Optional[int] = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a_ : Optional[Any] = False
a_ : Optional[int] = ClassLabel(num_classes=7 , names=list(set(train_test_validation["train"]["complexity"] ) ) )
def tokenize(_SCREAMING_SNAKE_CASE :str ):
a_ : List[Any] = tokenizer(example["src"] , truncation=_SCREAMING_SNAKE_CASE , max_length=1024 )
a_ : List[Any] = labels.straint(example["complexity"] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a_ : Any = train_test_validation.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=train_test_validation["train"].column_names , )
a_ : Tuple = DataCollatorWithPadding(tokenizer=_SCREAMING_SNAKE_CASE )
a_ : Union[str, Any] = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy="epoch" , save_strategy="epoch" , logging_strategy="epoch" , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model="accuracy" , run_name="complexity-java" , report_to="wandb" , )
a_ : Optional[Any] = Trainer(
model=_SCREAMING_SNAKE_CASE , args=_SCREAMING_SNAKE_CASE , train_dataset=tokenized_datasets["train"] , eval_dataset=tokenized_datasets["valid"] , tokenizer=_SCREAMING_SNAKE_CASE , data_collator=_SCREAMING_SNAKE_CASE , compute_metrics=_SCREAMING_SNAKE_CASE , )
print("Training..." )
trainer.add_callback(CustomCallback(_SCREAMING_SNAKE_CASE ) )
trainer.train()
if __name__ == "__main__":
main()
| 473 | """simple docstring"""
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
UpperCamelCase = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
UpperCamelCase = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Vector , _SCREAMING_SNAKE_CASE :Vector ) -> VectorOut:
return np.sqrt(np.sum((np.asarray(_SCREAMING_SNAKE_CASE ) - np.asarray(_SCREAMING_SNAKE_CASE )) ** 2 ) )
def lowerCAmelCase_ (_SCREAMING_SNAKE_CASE :Vector , _SCREAMING_SNAKE_CASE :Vector ) -> VectorOut:
return sum((va - va) ** 2 for va, va in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCAmelCase_ () -> None:
from timeit import timeit
print("Without Numpy" )
print(
timeit(
"euclidean_distance_no_np([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
print("With Numpy" )
print(
timeit(
"euclidean_distance([1, 2, 3], [4, 5, 6])" , number=10000 , globals=globals() , ) )
benchmark()
| 473 | 1 |
import os
from datetime import datetime as dt
from github import Github
SCREAMING_SNAKE_CASE : Tuple = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""enhancement""",
"""new pipeline/model""",
"""new scheduler""",
"""wip""",
]
def __A ( ):
"""simple docstring"""
__a = Github(os.environ["GITHUB_TOKEN"] )
__a = g.get_repo("huggingface/diffusers" )
__a = repo.get_issues(state="open" )
for issue in open_issues:
__a = sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
__a = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="closed" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="open" )
issue.remove_from_labels("stale" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
issue.add_to_labels("stale" )
if __name__ == "__main__":
main()
| 525 | from datetime import datetime as dt
import os
from github import Github
SCREAMING_SNAKE_CASE : Optional[Any] = [
"""good first issue""",
"""good second issue""",
"""good difficult issue""",
"""feature request""",
"""new model""",
"""wip""",
]
def __A ( ):
"""simple docstring"""
__a = Github(os.environ["GITHUB_TOKEN"] )
__a = g.get_repo("huggingface/transformers" )
__a = repo.get_issues(state="open" )
for issue in open_issues:
__a = sorted([comment for comment in issue.get_comments()] , key=lambda _A : i.created_at , reverse=_A )
__a = comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 525 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Tuple = """marian"""
__snake_case : Any = ["""past_key_values"""]
__snake_case : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[Any] , __lowercase :Any=5_8101 , __lowercase :Tuple=None , __lowercase :Union[str, Any]=1024 , __lowercase :Dict=12 , __lowercase :int=4096 , __lowercase :int=16 , __lowercase :List[Any]=12 , __lowercase :Dict=4096 , __lowercase :Dict=16 , __lowercase :Tuple=0.0 , __lowercase :Tuple=0.0 , __lowercase :List[Any]=True , __lowercase :int=True , __lowercase :Tuple="gelu" , __lowercase :str=1024 , __lowercase :Optional[int]=0.1 , __lowercase :List[str]=0.0 , __lowercase :Union[str, Any]=0.0 , __lowercase :Dict=0.02 , __lowercase :Tuple=5_8100 , __lowercase :Optional[Any]=False , __lowercase :int=5_8100 , __lowercase :Any=0 , __lowercase :str=0 , __lowercase :str=True , **__lowercase :Any , ):
__lowerCamelCase : List[Any] =vocab_size
__lowerCamelCase : Optional[int] =decoder_vocab_size or vocab_size
__lowerCamelCase : Tuple =max_position_embeddings
__lowerCamelCase : List[Any] =d_model
__lowerCamelCase : Any =encoder_ffn_dim
__lowerCamelCase : str =encoder_layers
__lowerCamelCase : List[str] =encoder_attention_heads
__lowerCamelCase : str =decoder_ffn_dim
__lowerCamelCase : Tuple =decoder_layers
__lowerCamelCase : Any =decoder_attention_heads
__lowerCamelCase : List[Any] =dropout
__lowerCamelCase : Any =attention_dropout
__lowerCamelCase : Union[str, Any] =activation_dropout
__lowerCamelCase : Optional[int] =activation_function
__lowerCamelCase : Dict =init_std
__lowerCamelCase : List[Any] =encoder_layerdrop
__lowerCamelCase : Optional[Any] =decoder_layerdrop
__lowerCamelCase : Any =use_cache
__lowerCamelCase : Any =encoder_layers
__lowerCamelCase : Optional[int] =scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase : int =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowercase ( self :int ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase : Optional[Any] ={0: '''batch'''}
__lowerCamelCase : Any ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowerCamelCase : Optional[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
__lowerCamelCase : List[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase : Any =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : List[str] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowerCamelCase : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowercase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =super().outputs
else:
__lowerCamelCase : List[str] =super(__lowercase , self ).outputs
if self.use_past:
__lowerCamelCase , __lowerCamelCase : int =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Any ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowercase ( self :str , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
__lowerCamelCase : Optional[Any] =seq_length if not self.use_past else 1
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__lowerCamelCase : Dict ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase : str =dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
__lowerCamelCase : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
__lowerCamelCase , __lowerCamelCase : Optional[int] =self.num_attention_heads
__lowerCamelCase : Any =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Dict =decoder_seq_length + 3
__lowerCamelCase : Tuple =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
__lowerCamelCase : Any =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
__lowerCamelCase : List[Any] =min(__lowercase , __lowercase )
__lowerCamelCase : int =max(__lowercase , __lowercase ) - min_num_layers
__lowerCamelCase : Any ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
__lowerCamelCase : Dict =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowercase ( self :Dict , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCamelCase : str =seqlen + 2
__lowerCamelCase , __lowerCamelCase : List[str] =self.num_layers
__lowerCamelCase , __lowerCamelCase : Optional[Any] =self.num_attention_heads
__lowerCamelCase : List[str] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Any =common_inputs['''attention_mask'''].dtype
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
__lowerCamelCase : List[str] =[
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowercase ( self :int , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase : Union[str, Any] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase : Optional[int] =tokenizer.num_special_tokens_to_add(__lowercase )
__lowerCamelCase : Optional[int] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase : Any =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase : Union[str, Any] =dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def __lowercase ( self :Tuple , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : int =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def __lowercase ( self :Optional[int] , __lowercase :Tuple , __lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : str =super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
__lowerCamelCase : Optional[Any] =super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def __lowercase ( self :List[str] ):
return 1e-4
| 179 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
__lowerCamelCase : Optional[Any] =XLMProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : List[Any] =XLMProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
else:
__lowerCamelCase : int =ProphetNetForConditionalGenerationOld.from_pretrained(SCREAMING_SNAKE_CASE )
__lowerCamelCase , __lowerCamelCase : int =ProphetNetForConditionalGeneration.from_pretrained(
SCREAMING_SNAKE_CASE , output_loading_info=SCREAMING_SNAKE_CASE )
__lowerCamelCase : Optional[Any] =['''key_proj''', '''value_proj''', '''query_proj''']
__lowerCamelCase : Tuple ={
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
__lowerCamelCase : int =key.split('''.''' )
if attributes[0] == "lm_head":
__lowerCamelCase : int =prophet
__lowerCamelCase : Optional[int] =prophet_old
else:
__lowerCamelCase : Any =prophet.prophetnet
__lowerCamelCase : Union[str, Any] =prophet_old.model
__lowerCamelCase : Optional[Any] =False
for attribute in attributes:
if attribute in mapping:
__lowerCamelCase : Optional[Any] =mapping[attribute]
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and len(SCREAMING_SNAKE_CASE ) > 0:
__lowerCamelCase : Any =attribute
elif hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__lowerCamelCase : Any =attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
__lowerCamelCase : str =old_model.weight
logger.info(F'{attribute} is initialized.' )
__lowerCamelCase : Any =True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
__lowerCamelCase : Union[str, Any] =old_model.bias
logger.info(F'{attribute} is initialized' )
__lowerCamelCase : str =True
break
elif attribute in special_keys and hasattr(SCREAMING_SNAKE_CASE , '''in_proj_weight''' ):
__lowerCamelCase : int =old_model.in_proj_weight.shape[0] // 3
__lowerCamelCase : Union[str, Any] =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
__lowerCamelCase : str =nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
__lowerCamelCase : List[str] =nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
__lowerCamelCase : Tuple =nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
__lowerCamelCase : Optional[Any] =nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
__lowerCamelCase : int =nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
__lowerCamelCase : Dict =True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 512, "We want 512 position_embeddings."
__lowerCamelCase : str =nn.Parameter(old_model.embed_positions.weight[:512, :] )
__lowerCamelCase : Dict =True
break
if attribute.isdigit():
__lowerCamelCase : List[str] =model[int(SCREAMING_SNAKE_CASE )]
__lowerCamelCase : Optional[Any] =old_model[int(SCREAMING_SNAKE_CASE )]
else:
__lowerCamelCase : int =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if old_attribute == "":
__lowerCamelCase : Dict =old_model
else:
if not hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
raise ValueError(F'{old_model} does not have {old_attribute}' )
__lowerCamelCase : Tuple =getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if not is_key_init:
raise ValueError(F'{key} was not correctly initialized!' )
print(F'Saving model to {pytorch_dump_folder_path}' )
prophet.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
_UpperCamelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 179 | 1 |
import random
import unittest
from torch.utils.data import BatchSampler, DataLoader, IterableDataset
from accelerate import Accelerator
from accelerate.data_loader import (
BatchSamplerShard,
DataLoaderDispatcher,
DataLoaderShard,
IterableDatasetShard,
SkipBatchSampler,
SkipDataLoader,
skip_first_batches,
)
class __snake_case ( __lowerCamelCase ):
'''simple docstring'''
def __init__( self : List[Any] , A : Any=0.01 , A : List[Any]=1_000 ):
__snake_case: str = p_stop
__snake_case: Dict = max_length
def __iter__( self : Union[str, Any] ):
__snake_case: List[str] = 0
__snake_case: List[Any] = False
while not stop and count < self.max_length:
yield count
count += 1
__snake_case: Tuple = random.random() < self.p_stop
class __snake_case ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : Tuple , A : int , A : Dict , A : Optional[int]=False , A : Union[str, Any]=True ):
__snake_case: List[str] = [
BatchSamplerShard(A , 2 , A , split_batches=A , even_batches=A )
for i in range(2 )
]
__snake_case: List[Any] = [list(A ) for batch_sampler_shard in batch_sampler_shards]
if not split_batches:
self.assertListEqual([len(A ) for shard in batch_sampler_shards] , [len(A ) for e in expected] )
self.assertListEqual(A , A )
def UpperCAmelCase__ ( self : Optional[int] ):
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case: List[str] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
__snake_case: Union[str, Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A )
__snake_case: Optional[Any] = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case: Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
__snake_case: int = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [0, 1, 2]],
]
self.check_batch_sampler_shards(A , A )
__snake_case: Union[str, Any] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
__snake_case: Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case: Dict = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
__snake_case: Dict = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 0, 1]],
]
self.check_batch_sampler_shards(A , A )
__snake_case: Tuple = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
__snake_case: List[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case: Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
__snake_case: str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 0]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [1, 2, 3]],
]
self.check_batch_sampler_shards(A , A )
__snake_case: Union[str, Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
__snake_case: str = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A )
# Check the shards when the dataset is very small.
__snake_case: Optional[int] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
__snake_case: List[Any] = [[[0, 1, 0]], [[1, 0, 1]]]
self.check_batch_sampler_shards(A , A )
__snake_case: List[Any] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
__snake_case: Dict = [[], []]
self.check_batch_sampler_shards(A , A )
def UpperCAmelCase__ ( self : Optional[int] ):
# Check the shards when the dataset is a round multiple of batch size.
__snake_case: Dict = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
__snake_case: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
__snake_case: Union[str, Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case: Optional[Any] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
__snake_case: Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [0, 1]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
__snake_case: int = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
__snake_case: List[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case: Dict = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
__snake_case: Any = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 0]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [1, 2]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
__snake_case: Optional[int] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
__snake_case: Dict = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A )
# Check the shards when the dataset is very small.
__snake_case: List[Any] = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
__snake_case: Dict = [[[0, 1]], [[0, 1]]]
self.check_batch_sampler_shards(A , A , split_batches=A )
__snake_case: str = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
__snake_case: Optional[Any] = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A )
def UpperCAmelCase__ ( self : List[str] ):
# Check the shards when the dataset is a round multiple of total batch size.
__snake_case: int = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
__snake_case: Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21, 22, 23]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
__snake_case: str = BatchSampler(range(24 ) , batch_size=3 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is a round multiple of batch size but not total batch size.
__snake_case: List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
__snake_case: List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
__snake_case: List[str] = BatchSampler(range(21 ) , batch_size=3 , drop_last=A )
__snake_case: Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but has a multiple of
# num_processes batch.
__snake_case: Any = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
__snake_case: Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19, 20]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17], [21]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
__snake_case: List[str] = BatchSampler(range(22 ) , batch_size=3 , drop_last=A )
__snake_case: List[str] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size but and has not a multiple of
# num_processes batch.
__snake_case: Optional[Any] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
__snake_case: Tuple = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14], [18, 19]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
__snake_case: List[str] = BatchSampler(range(20 ) , batch_size=3 , drop_last=A )
__snake_case: Optional[Any] = [
[[0, 1, 2], [6, 7, 8], [12, 13, 14]],
[[3, 4, 5], [9, 10, 11], [15, 16, 17]],
]
self.check_batch_sampler_shards(A , A , even_batches=A )
# Check the shards when the dataset is very small.
__snake_case: str = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
__snake_case: str = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
__snake_case: List[str] = BatchSampler(range(2 ) , batch_size=3 , drop_last=A )
__snake_case: str = [[], []]
self.check_batch_sampler_shards(A , A , even_batches=A )
def UpperCAmelCase__ ( self : Union[str, Any] ):
# Check the shards when the dataset is a round multiple of batch size.
__snake_case: Optional[int] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
__snake_case: int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19], [22, 23]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
__snake_case: Optional[Any] = BatchSampler(range(24 ) , batch_size=4 , drop_last=A )
# Expected shouldn't change
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size.
__snake_case: int = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
__snake_case: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20, 21]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
__snake_case: Optional[int] = BatchSampler(range(22 ) , batch_size=4 , drop_last=A )
__snake_case: int = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is not a round multiple of batch size or num_processes.
__snake_case: Optional[Any] = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
__snake_case: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17], [20]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
__snake_case: Tuple = BatchSampler(range(21 ) , batch_size=4 , drop_last=A )
__snake_case: Optional[Any] = [
[[0, 1], [4, 5], [8, 9], [12, 13], [16, 17]],
[[2, 3], [6, 7], [10, 11], [14, 15], [18, 19]],
]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
# Check the shards when the dataset is very small.
__snake_case: Tuple = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
__snake_case: Any = [[[0, 1]], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
__snake_case: Any = BatchSampler(range(2 ) , batch_size=4 , drop_last=A )
__snake_case: Optional[int] = [[], []]
self.check_batch_sampler_shards(A , A , split_batches=A , even_batches=A )
def UpperCAmelCase__ ( self : Tuple ):
__snake_case: Any = [[0, 1, 2], [3, 4], [5, 6, 7, 8], [9, 10, 11], [12, 13]]
__snake_case: Optional[int] = [BatchSamplerShard(A , 2 , A , even_batches=A ) for i in range(2 )]
self.assertEqual(len(batch_sampler_shards[0] ) , 3 )
self.assertEqual(len(batch_sampler_shards[1] ) , 2 )
self.assertListEqual(list(batch_sampler_shards[0] ) , [[0, 1, 2], [5, 6, 7, 8], [12, 13]] )
self.assertListEqual(list(batch_sampler_shards[1] ) , [[3, 4], [9, 10, 11]] )
def UpperCAmelCase__ ( self : str , A : Union[str, Any] , A : int , A : Optional[Any] , A : Optional[Any]=False , A : Dict=2 , A : Optional[int]=False ):
random.seed(A )
__snake_case: Optional[int] = list(A )
__snake_case: int = [
IterableDatasetShard(
A , batch_size=A , drop_last=A , num_processes=A , process_index=A , split_batches=A , )
for i in range(A )
]
__snake_case: int = []
for iterable_dataset_shard in iterable_dataset_shards:
# Since our random iterable dataset will be... random... we need to use a seed to get reproducible results.
random.seed(A )
iterable_dataset_lists.append(list(A ) )
__snake_case: Optional[int] = batch_size // num_processes if split_batches else batch_size
# All iterable dataset shard should have the same length, a round multiple of shard_batch_size
__snake_case: List[Any] = iterable_dataset_lists[0]
for l in iterable_dataset_lists[1:]:
self.assertEqual(len(A ) , len(A ) )
self.assertTrue(len(A ) % shard_batch_size == 0 )
__snake_case: List[str] = []
for idx in range(0 , len(A ) , A ):
for l in iterable_dataset_lists:
observed += l[idx : idx + shard_batch_size]
if not drop_last:
while len(A ) < len(A ):
reference += reference
self.assertListEqual(A , reference[: len(A )] )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: int = 42
__snake_case: List[Any] = RandomIterableDataset()
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
# Edge case with a very small dataset
__snake_case: List[Any] = RandomIterableDataset(max_length=2 )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
self.check_iterable_dataset_shards(A , A , batch_size=4 , drop_last=A , split_batches=A )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Optional[Any] = BatchSampler(range(16 ) , batch_size=4 , drop_last=A )
__snake_case: str = SkipBatchSampler(A , 2 )
self.assertListEqual(list(A ) , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCAmelCase__ ( self : Dict ):
__snake_case: Tuple = SkipDataLoader(list(range(16 ) ) , batch_size=4 , skip_batches=2 )
self.assertListEqual([t.tolist() for t in dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCAmelCase__ ( self : str ):
__snake_case: int = DataLoader(list(range(16 ) ) , batch_size=4 )
__snake_case: Any = skip_first_batches(A , num_batches=2 )
self.assertListEqual([t.tolist() for t in new_dataloader] , [[8, 9, 10, 11], [12, 13, 14, 15]] )
def UpperCAmelCase__ ( self : Optional[int] ):
__snake_case: Dict = DataLoaderShard(list(range(16 ) ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
def UpperCAmelCase__ ( self : Optional[int] ):
Accelerator()
__snake_case: Optional[Any] = DataLoaderDispatcher(range(16 ) , batch_size=4 )
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
# Test it also works on the second iteration
for idx, _ in enumerate(A ):
self.assertEqual(dataloader.end_of_dataloader , idx == 3 )
| 155 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase : Optional[Any] = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : List[str] = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 155 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
__A = random.Random()
def SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]:
if rng is None:
lowercase__: List[Any] = global_rng
lowercase__: List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=7 , _UpperCAmelCase=400 , _UpperCAmelCase=2000 , _UpperCAmelCase=2048 , _UpperCAmelCase=128 , _UpperCAmelCase=1 , _UpperCAmelCase=512 , _UpperCAmelCase=30 , _UpperCAmelCase=44100 , ):
lowercase__: Any = parent
lowercase__: List[Any] = batch_size
lowercase__: Dict = min_seq_length
lowercase__: Union[str, Any] = max_seq_length
lowercase__: int = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowercase__: Dict = spectrogram_length
lowercase__: Any = feature_size
lowercase__: str = num_audio_channels
lowercase__: str = hop_length
lowercase__: str = chunk_length
lowercase__: Tuple = sampling_rate
def _snake_case ( self ):
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def _snake_case ( self , _UpperCAmelCase=False , _UpperCAmelCase=False ):
def _flatten(_UpperCAmelCase ):
return list(itertools.chain(*_UpperCAmelCase ) )
if equal_length:
lowercase__: Optional[Any] = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
lowercase__: int = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowercase__: str = [np.asarray(_UpperCAmelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :str = TvltFeatureExtractor
def _snake_case ( self ):
lowercase__: Tuple = TvltFeatureExtractionTester(self )
def _snake_case ( self ):
lowercase__: Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(_UpperCAmelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''feature_size''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''hop_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(_UpperCAmelCase , '''sampling_rate''' ) )
def _snake_case ( self ):
lowercase__: str = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Dict = feat_extract_first.save_pretrained(_UpperCAmelCase )[0]
check_json_file_has_correct_format(_UpperCAmelCase )
lowercase__: Tuple = self.feature_extraction_class.from_pretrained(_UpperCAmelCase )
lowercase__: List[str] = feat_extract_first.to_dict()
lowercase__: Dict = feat_extract_second.to_dict()
lowercase__: Optional[Any] = dict_first.pop('''mel_filters''' )
lowercase__: List[Any] = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
lowercase__: List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase__: Tuple = os.path.join(_UpperCAmelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(_UpperCAmelCase )
lowercase__: Optional[int] = self.feature_extraction_class.from_json_file(_UpperCAmelCase )
lowercase__: Optional[Any] = feat_extract_first.to_dict()
lowercase__: List[str] = feat_extract_second.to_dict()
lowercase__: Dict = dict_first.pop('''mel_filters''' )
lowercase__: Dict = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def _snake_case ( self ):
# Initialize feature_extractor
lowercase__: int = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
lowercase__: int = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
lowercase__: Dict = [np.asarray(_UpperCAmelCase ) for speech_input in speech_inputs]
# Test not batched input
lowercase__: Optional[int] = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
lowercase__: Any = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
lowercase__: Union[str, Any] = feature_extractor(
_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 , mask_audio=_UpperCAmelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
lowercase__: List[str] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowercase__: Optional[Any] = np.asarray(_UpperCAmelCase )
lowercase__: List[Any] = feature_extractor(_UpperCAmelCase , return_tensors='''np''' , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def _snake_case ( self , _UpperCAmelCase ):
lowercase__: Tuple = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
lowercase__: Dict = ds.sort('''id''' ).select(range(_UpperCAmelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def _snake_case ( self ):
lowercase__: Optional[int] = self._load_datasamples(1 )
lowercase__: int = TvltFeatureExtractor()
lowercase__: Any = feature_extractor(_UpperCAmelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
lowercase__: List[Any] = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , _UpperCAmelCase , atol=1e-4 ) )
| 586 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
"facebook/timesformer": "https://huggingface.co/facebook/timesformer/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Tuple = "timesformer"
def __init__( self , _UpperCAmelCase=224 , _UpperCAmelCase=16 , _UpperCAmelCase=3 , _UpperCAmelCase=8 , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3072 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1e-6 , _UpperCAmelCase=True , _UpperCAmelCase="divided_space_time" , _UpperCAmelCase=0 , **_UpperCAmelCase , ):
super().__init__(**_UpperCAmelCase )
lowercase__: Optional[int] = image_size
lowercase__: Optional[Any] = patch_size
lowercase__: Dict = num_channels
lowercase__: Tuple = num_frames
lowercase__: Any = hidden_size
lowercase__: Optional[int] = num_hidden_layers
lowercase__: int = num_attention_heads
lowercase__: Optional[int] = intermediate_size
lowercase__: Optional[int] = hidden_act
lowercase__: int = hidden_dropout_prob
lowercase__: Tuple = attention_probs_dropout_prob
lowercase__: Union[str, Any] = initializer_range
lowercase__: List[Any] = layer_norm_eps
lowercase__: str = qkv_bias
lowercase__: Tuple = attention_type
lowercase__: Tuple = drop_path_rate
| 586 | 1 |
'''simple docstring'''
def lowercase__ ( __lowercase : List[str] ) -> list[int]:
"""simple docstring"""
if length <= 0 or not isinstance(a__ , a__ ):
raise ValueError('Length must be a positive integer.' )
return [n * (2 * n - 1) for n in range(a__ )]
if __name__ == "__main__":
print(hexagonal_numbers(length=5))
print(hexagonal_numbers(length=10)) | 716 |
'''simple docstring'''
from __future__ import annotations
from math import gcd
def lowercase__ ( __lowercase : int , __lowercase : int = 2 , __lowercase : int = 1 , __lowercase : int = 3 , ) -> int | None:
"""simple docstring"""
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(__lowercase : int , __lowercase : int , __lowercase : int ) -> int:
return (pow(__lowercase , 2 ) + step) % modulus
for _ in range(__lowercase ):
# These track the position within the cycle detection logic.
__UpperCamelCase = seed
__UpperCamelCase = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
__UpperCamelCase = rand_fn(__lowercase , __lowercase , __lowercase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
__UpperCamelCase = gcd(hare - tortoise , __lowercase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
__UpperCamelCase = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a__ : Optional[Any] =argparse.ArgumentParser()
parser.add_argument(
'''num''',
type=int,
help='''The value to find a divisor of''',
)
parser.add_argument(
'''--attempts''',
type=int,
default=3,
help='''The number of attempts before giving up''',
)
a__ : List[Any] =parser.parse_args()
a__ : str =pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(f'{args.num} is probably prime')
else:
a__ : Any =args.num // divisor
print(f'{args.num} = {divisor} * {quotient}')
| 434 | 0 |
import gc
import unittest
from diffusers import FlaxControlNetModel, FlaxStableDiffusionControlNetPipeline
from diffusers.utils import is_flax_available, load_image, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class __lowerCamelCase ( unittest.TestCase ):
"""simple docstring"""
def a ( self : Union[str, Any] ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def a ( self : str ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-canny" , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCAmelCase__ , lowerCAmelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCAmelCase__ = controlnet_params
lowerCAmelCase__ = "bird"
lowerCAmelCase__ = jax.device_count()
lowerCAmelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" )
lowerCAmelCase__ = pipe.prepare_image_inputs([canny_image] * num_samples )
lowerCAmelCase__ = jax.random.PRNGKey(0 )
lowerCAmelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCAmelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCAmelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ = images[0, 253:256, 253:256, -1]
lowerCAmelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ = jnp.array(
[0.167_969, 0.116_699, 0.081_543, 0.154_297, 0.132_812, 0.108_887, 0.169_922, 0.169_922, 0.205_078] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def a ( self : List[str] ) -> Any:
lowerCAmelCase__ , lowerCAmelCase__ = FlaxControlNetModel.from_pretrained(
"lllyasviel/sd-controlnet-openpose" , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCAmelCase__ , lowerCAmelCase__ = FlaxStableDiffusionControlNetPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , controlnet=SCREAMING_SNAKE_CASE__ , from_pt=SCREAMING_SNAKE_CASE__ , dtype=jnp.bfloataa )
lowerCAmelCase__ = controlnet_params
lowerCAmelCase__ = "Chef in the kitchen"
lowerCAmelCase__ = jax.device_count()
lowerCAmelCase__ = pipe.prepare_text_inputs([prompts] * num_samples )
lowerCAmelCase__ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/pose.png" )
lowerCAmelCase__ = pipe.prepare_image_inputs([pose_image] * num_samples )
lowerCAmelCase__ = jax.random.PRNGKey(0 )
lowerCAmelCase__ = jax.random.split(SCREAMING_SNAKE_CASE__ , jax.device_count() )
lowerCAmelCase__ = replicate(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = shard(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = pipe(
prompt_ids=SCREAMING_SNAKE_CASE__ , image=SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ , prng_seed=SCREAMING_SNAKE_CASE__ , num_inference_steps=50 , jit=SCREAMING_SNAKE_CASE__ , ).images
assert images.shape == (jax.device_count(), 1, 768, 512, 3)
lowerCAmelCase__ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
lowerCAmelCase__ = images[0, 253:256, 253:256, -1]
lowerCAmelCase__ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
lowerCAmelCase__ = jnp.array(
[[0.271_484, 0.261_719, 0.275_391, 0.277_344, 0.279_297, 0.291_016, 0.294_922, 0.302_734, 0.302_734]] )
print(f'output_slice: {output_slice}' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 61 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a : List[Any] = '''\
'''
a : Optional[int] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a : List[Any] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __UpperCamelCase ( datasets.Metric ):
def __a ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"input_texts": datasets.Value("string" ),
} ) , reference_urls=["https://huggingface.co/docs/transformers/perplexity"] , )
def __a ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 16 , lowerCAmelCase__ = True , lowerCAmelCase__=None ) -> Tuple:
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
a : Optional[Any] = "cuda"
else:
a : str = "cuda" if torch.cuda.is_available() else "cpu"
a : str = AutoModelForCausalLM.from_pretrained(lowerCAmelCase__ )
a : int = model.to(lowerCAmelCase__ )
a : str = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
a : str = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(lowerCAmelCase__ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"pad_token": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
a : List[Any] = model.config.max_length - 1
else:
a : Union[str, Any] = model.config.max_length
a : Union[str, Any] = tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , return_tensors="pt" , return_attention_mask=lowerCAmelCase__ , ).to(lowerCAmelCase__ )
a : str = encodings["input_ids"]
a : Optional[int] = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
a : List[Any] = []
a : List[Any] = CrossEntropyLoss(reduction="none" )
for start_index in logging.tqdm(range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ):
a : Optional[Any] = min(start_index + batch_size , len(lowerCAmelCase__ ) )
a : Any = encoded_texts[start_index:end_index]
a : List[Any] = attn_masks[start_index:end_index]
if add_start_token:
a : int = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(lowerCAmelCase__ )
a : str = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
a : str = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(lowerCAmelCase__ ), attn_mask] , dim=1 )
a : Any = encoded_batch
with torch.no_grad():
a : Optional[int] = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ ).logits
a : List[Any] = out_logits[..., :-1, :].contiguous()
a : int = labels[..., 1:].contiguous()
a : Union[str, Any] = attn_mask[..., 1:].contiguous()
a : Dict = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , lowerCAmelCase__ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(lowerCAmelCase__ )}
| 633 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=99 , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=9 , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__=8 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.0_0_2 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ) -> List[Any]:
A__ = parent
A__ = batch_size
A__ = encoder_seq_length
A__ = decoder_seq_length
# For common tests
A__ = self.decoder_seq_length
A__ = is_training
A__ = use_attention_mask
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = d_ff
A__ = relative_attention_num_buckets
A__ = dropout_rate
A__ = initializer_factor
A__ = eos_token_id
A__ = pad_token_id
A__ = decoder_start_token_id
A__ = None
A__ = decoder_layers
def snake_case__ ( self ) -> Union[str, Any]:
return TaConfig.from_pretrained("google/umt5-base" )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , ) -> Tuple:
if attention_mask is None:
A__ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
A__ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
A__ = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if decoder_head_mask is None:
A__ = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
if cross_attn_head_mask is None:
A__ = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=SCREAMING_SNAKE_CASE__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def snake_case__ ( self ) -> Union[str, Any]:
A__ = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
A__ = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
A__ = input_ids.clamp(self.pad_token_id + 1 )
A__ = decoder_input_ids.clamp(self.pad_token_id + 1 )
A__ = self.get_config()
A__ = config.num_attention_heads
A__ = self.prepare_inputs_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return config, input_dict
def snake_case__ ( self ) -> Union[str, Any]:
A__ , A__ = self.prepare_config_and_inputs()
return config, inputs_dict
def snake_case__ ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self ) -> Union[str, Any]:
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Tuple:
A__ = UMTaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
A__ = model(
input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , decoder_attention_mask=SCREAMING_SNAKE_CASE__ , )
A__ = model(input_ids=SCREAMING_SNAKE_CASE__ , decoder_input_ids=SCREAMING_SNAKE_CASE__ )
A__ = result.last_hidden_state
A__ = result.past_key_values
A__ = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> Optional[Any]:
A__ = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).get_decoder().to(SCREAMING_SNAKE_CASE__ ).eval()
# first forward pass
A__ = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
A__ = model(SCREAMING_SNAKE_CASE__ )
A__ = model(SCREAMING_SNAKE_CASE__ , use_cache=SCREAMING_SNAKE_CASE__ )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) )
self.parent.assertTrue(len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ ) + 1 )
A__ , A__ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
A__ = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
A__ = torch.cat([input_ids, next_tokens] , dim=-1 )
A__ = model(SCREAMING_SNAKE_CASE__ )["last_hidden_state"]
A__ = model(SCREAMING_SNAKE_CASE__ , past_key_values=SCREAMING_SNAKE_CASE__ )["last_hidden_state"]
# select random slice
A__ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
A__ = output_from_no_past[:, -1, random_slice_idx].detach()
A__ = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , atol=1e-3 ) )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , ) -> List[str]:
A__ = UMTaModel(config=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ ).half().eval()
A__ = model(**SCREAMING_SNAKE_CASE__ )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(SCREAMING_SNAKE_CASE__ ).any().item() )
@require_torch
class UpperCamelCase__ ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : Optional[int] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
A__ : int = (UMTaForConditionalGeneration,) if is_torch_available() else ()
A__ : str = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
A__ : List[str] = True
A__ : List[str] = False
A__ : Union[str, Any] = False
A__ : Union[str, Any] = True
A__ : str = True
# The small UMT5 model needs higher percentages for CPU/MP tests
A__ : Optional[Any] = [0.8, 0.9]
def snake_case__ ( self ) -> str:
A__ = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
A__ = UMTaModel(config_and_inputs[0] ).to(SCREAMING_SNAKE_CASE__ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
SCREAMING_SNAKE_CASE__ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=SCREAMING_SNAKE_CASE__ , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def snake_case__ ( self ) -> Dict:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> List[str]:
A__ = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
A__ = self.model_tester.prepare_config_and_inputs()
A__ = config_and_inputs[0]
A__ = UMTaForConditionalGeneration(SCREAMING_SNAKE_CASE__ ).eval()
model.to(SCREAMING_SNAKE_CASE__ )
A__ = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ ),
}
for attn_name, (name, mask) in zip(SCREAMING_SNAKE_CASE__ , head_masking.items() ):
A__ = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
A__ = torch.ones(
config.num_decoder_layers , config.num_heads , device=SCREAMING_SNAKE_CASE__ )
A__ = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=SCREAMING_SNAKE_CASE__ , return_dict_in_generate=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
A__ = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def snake_case__ ( self ) -> List[Any]:
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def snake_case__ ( self ) -> int:
A__ = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
A__ = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=SCREAMING_SNAKE_CASE__ , legacy=SCREAMING_SNAKE_CASE__ )
A__ = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors="pt" , padding=SCREAMING_SNAKE_CASE__ ).input_ids
# fmt: off
A__ = torch.tensor(
[
[ 38530, 210703, 256299, 1410, 256298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 25922, 256299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 19014, 10620, 758, 256299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 256299, 14869, 281, 301, 256298, 275, 119983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 256299, 14869, 281, 2234, 289, 2275, 333,61391, 289, 256298, 543, 256297, 168714, 329, 256296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ = model.generate(input_ids.to(SCREAMING_SNAKE_CASE__ ) )
A__ = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
| 562 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_wav2vec2""": ["""WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Wav2Vec2Config"""],
"""feature_extraction_wav2vec2""": ["""Wav2Vec2FeatureExtractor"""],
"""processing_wav2vec2""": ["""Wav2Vec2Processor"""],
"""tokenization_wav2vec2""": ["""Wav2Vec2CTCTokenizer""", """Wav2Vec2Tokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Wav2Vec2ForAudioFrameClassification""",
"""Wav2Vec2ForCTC""",
"""Wav2Vec2ForMaskedLM""",
"""Wav2Vec2ForPreTraining""",
"""Wav2Vec2ForSequenceClassification""",
"""Wav2Vec2ForXVector""",
"""Wav2Vec2Model""",
"""Wav2Vec2PreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWav2Vec2ForCTC""",
"""TFWav2Vec2Model""",
"""TFWav2Vec2PreTrainedModel""",
"""TFWav2Vec2ForSequenceClassification""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxWav2Vec2ForCTC""",
"""FlaxWav2Vec2ForPreTraining""",
"""FlaxWav2Vec2Model""",
"""FlaxWav2Vec2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 562 | 1 |
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> Union[str, Any]:
UpperCAmelCase_ , UpperCAmelCase_ = [], []
while len(__UpperCamelCase ) > 1:
UpperCAmelCase_ , UpperCAmelCase_ = min(__UpperCamelCase ), max(__UpperCamelCase )
start.append(__UpperCamelCase )
end.append(__UpperCamelCase )
collection.remove(__UpperCamelCase )
collection.remove(__UpperCamelCase )
end.reverse()
return start + collection + end
if __name__ == "__main__":
_lowerCamelCase = input('Enter numbers separated by a comma:\n').strip()
_lowerCamelCase = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 144 |
from __future__ import annotations
import unittest
from transformers import BlenderbotSmallConfig, BlenderbotSmallTokenizer, is_tf_available
from transformers.testing_utils import require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel
@require_tf
class a :
'''simple docstring'''
lowerCAmelCase : Dict = BlenderbotSmallConfig
lowerCAmelCase : Optional[Any] = {}
lowerCAmelCase : str = 'gelu'
def __init__( self : int , __snake_case : Any , __snake_case : str=13 , __snake_case : int=7 , __snake_case : str=True , __snake_case : List[str]=False , __snake_case : Optional[Any]=99 , __snake_case : Optional[Any]=32 , __snake_case : str=2 , __snake_case : Optional[int]=4 , __snake_case : int=37 , __snake_case : Any=0.1 , __snake_case : List[str]=0.1 , __snake_case : Tuple=20 , __snake_case : Dict=2 , __snake_case : str=1 , __snake_case : Any=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_blenderbot_small_inputs_dict(__snake_case , __snake_case , __snake_case )
return config, inputs_dict
def lowerCamelCase_ ( self : str , __snake_case : Tuple , __snake_case : Any ):
UpperCAmelCase_ = TFBlenderbotSmallModel(config=__snake_case ).get_decoder()
UpperCAmelCase_ = inputs_dict['''input_ids''']
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict['''attention_mask'''][:1, :]
UpperCAmelCase_ = inputs_dict['''head_mask''']
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , head_mask=__snake_case , use_cache=__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case )[0]
UpperCAmelCase_ = model(__snake_case , attention_mask=__snake_case , past_key_values=__snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__snake_case , __snake_case , rtol=1E-3 )
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[Any]=None , ) -> str:
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class a ( _A , _A , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : Optional[int] = (
(TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel) if is_tf_available() else ()
)
lowerCAmelCase : Optional[Any] = (TFBlenderbotSmallForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase : Union[str, Any] = (
{
'conversational': TFBlenderbotSmallForConditionalGeneration,
'feature-extraction': TFBlenderbotSmallModel,
'summarization': TFBlenderbotSmallForConditionalGeneration,
'text2text-generation': TFBlenderbotSmallForConditionalGeneration,
'translation': TFBlenderbotSmallForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase : List[Any] = True
lowerCAmelCase : Tuple = False
lowerCAmelCase : List[Any] = False
def lowerCamelCase_ ( self : Optional[Any] ):
UpperCAmelCase_ = TFBlenderbotSmallModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=__snake_case )
def lowerCamelCase_ ( self : Any ):
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : int ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__snake_case )
@require_tokenizers
@require_tf
class a ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase : int = [
'Social anxiety\nWow, I am never shy. Do you have anxiety?\nYes. I end up sweating and blushing and feel like '
' i\'m going to throw up.\nand why is that?'
]
lowerCAmelCase : Optional[Any] = 'facebook/blenderbot_small-90M'
@cached_property
def lowerCamelCase_ ( self : List[str] ):
# use "old" tokenizer here because of bug when downloading new tokenizer
return BlenderbotSmallTokenizer.from_pretrained('''facebook/blenderbot-90M''' )
@cached_property
def lowerCamelCase_ ( self : Dict ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
@slow
def lowerCamelCase_ ( self : List[str] ):
UpperCAmelCase_ = self.tokenizer(self.src_text , return_tensors='''tf''' )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=__snake_case , )
UpperCAmelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=__snake_case )[0]
assert generated_words in (
"i don't know. i just feel like i'm going to throw up. it's not fun.",
"i'm not sure. i just feel like i've been feeling like i have to be in a certain place",
"i'm not sure. i just feel like i've been in a bad situation.",
)
| 144 | 1 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _a ( ):
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def _a ( _SCREAMING_SNAKE_CASE : int ):
print("Generating prime p..." )
_SCREAMING_SNAKE_CASE = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
print("Generating prime q..." )
_SCREAMING_SNAKE_CASE = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_SCREAMING_SNAKE_CASE = cryptoMath.find_mod_inverse(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
_SCREAMING_SNAKE_CASE = (n, e)
_SCREAMING_SNAKE_CASE = (n, d)
return (public_key, private_key)
def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print("\nWARNING:" )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program." )
sys.exit()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = generate_key(_SCREAMING_SNAKE_CASE )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , "w" ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , "w" ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main() | 718 |
'''simple docstring'''
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def _a ( ):
print("Making key files..." )
make_key_files("rsa" , 1024 )
print("Key files generation successful." )
def _a ( _SCREAMING_SNAKE_CASE : int ):
print("Generating prime p..." )
_SCREAMING_SNAKE_CASE = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
print("Generating prime q..." )
_SCREAMING_SNAKE_CASE = rabinMiller.generate_large_prime(_SCREAMING_SNAKE_CASE )
_SCREAMING_SNAKE_CASE = p * q
print("Generating e that is relatively prime to (p - 1) * (q - 1)..." )
while True:
_SCREAMING_SNAKE_CASE = random.randrange(2 ** (key_size - 1) , 2 ** (key_size) )
if cryptoMath.gcd(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) ) == 1:
break
print("Calculating d that is mod inverse of e..." )
_SCREAMING_SNAKE_CASE = cryptoMath.find_mod_inverse(_SCREAMING_SNAKE_CASE , (p - 1) * (q - 1) )
_SCREAMING_SNAKE_CASE = (n, e)
_SCREAMING_SNAKE_CASE = (n, d)
return (public_key, private_key)
def _a ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : int ):
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print("\nWARNING:" )
print(
F'"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'
"Use a different name or delete these files and re-run this program." )
sys.exit()
_SCREAMING_SNAKE_CASE, _SCREAMING_SNAKE_CASE = generate_key(_SCREAMING_SNAKE_CASE )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , "w" ) as out_file:
out_file.write(F'{key_size},{public_key[0]},{public_key[1]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , "w" ) as out_file:
out_file.write(F'{key_size},{private_key[0]},{private_key[1]}' )
if __name__ == "__main__":
main() | 493 | 0 |
from math import loga
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if a < 0:
raise ValueError('Input value must be a positive integer' )
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
raise TypeError('Input value must be a \'int\' type' )
return 0 if (a == 0) else int(loga(a & -a ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 669 |
from ..utils import DummyObject, requires_backends
class a__ ( metaclass=UpperCamelCase__ ):
a : int = ["""torch""", """scipy"""]
def __init__( self , *A , **A ) -> str:
'''simple docstring'''
requires_backends(self , ["torch", "scipy"] )
@classmethod
def lowerCAmelCase_ ( cls , *A , **A ) -> Any:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
@classmethod
def lowerCAmelCase_ ( cls , *A , **A ) -> Optional[int]:
'''simple docstring'''
requires_backends(cls , ["torch", "scipy"] )
| 515 | 0 |
'''simple docstring'''
import argparse
import collections
import json
from pathlib import Path
import requests
import torch
import yaml
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTImageProcessor,
MobileViTVaConfig,
MobileViTVaForImageClassification,
MobileViTVaForSemanticSegmentation,
)
from transformers.utils import logging
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
def _UpperCamelCase ( __UpperCamelCase ) -> Optional[int]:
print('Loading config file...' )
def flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase="" ,__UpperCamelCase="." ):
lowerCamelCase_ = []
for k, v in d.items():
lowerCamelCase_ = parent_key + sep + k if parent_key else k
if isinstance(__UpperCamelCase ,collections.abc.MutableMapping ):
items.extend(flatten_yaml_as_dict(__UpperCamelCase ,__UpperCamelCase ,sep=__UpperCamelCase ).items() )
else:
items.append((new_key, v) )
return dict(__UpperCamelCase )
lowerCamelCase_ = argparse.Namespace()
with open(__UpperCamelCase ,'r' ) as yaml_file:
try:
lowerCamelCase_ = yaml.load(__UpperCamelCase ,Loader=yaml.FullLoader )
lowerCamelCase_ = flatten_yaml_as_dict(__UpperCamelCase )
for k, v in flat_cfg.items():
setattr(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
except yaml.YAMLError as exc:
logger.error('Error while loading config file: {}. Error message: {}'.format(__UpperCamelCase ,str(__UpperCamelCase ) ) )
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = MobileViTVaConfig()
lowerCamelCase_ = False
# dataset
if task_name.startswith('imagenet1k_' ):
lowerCamelCase_ = 10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-1k-id2label.json'
elif task_name.startswith('imagenet21k_to_1k_' ):
lowerCamelCase_ = 2_10_00
if int(task_name.strip().split('_' )[-1] ) == 3_84:
lowerCamelCase_ = 3_84
else:
lowerCamelCase_ = 2_56
lowerCamelCase_ = 'imagenet-22k-id2label.json'
elif task_name.startswith('ade20k_' ):
lowerCamelCase_ = 1_51
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'ade20k-id2label.json'
lowerCamelCase_ = True
elif task_name.startswith('voc_' ):
lowerCamelCase_ = 21
lowerCamelCase_ = 5_12
lowerCamelCase_ = 'pascal-voc-id2label.json'
lowerCamelCase_ = True
# orig_config
lowerCamelCase_ = load_orig_config_file(__UpperCamelCase )
assert getattr(__UpperCamelCase ,'model.classification.name' ,-1 ) == "mobilevit_v2", "Invalid model"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.mitv2.width_multiplier' ,1.0 )
assert (
getattr(__UpperCamelCase ,'model.classification.mitv2.attn_norm_layer' ,-1 ) == "layer_norm_2d"
), "Norm layers other than layer_norm_2d is not supported"
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.classification.activation.name' ,'swish' )
# config.image_size == getattr(orig_config, 'sampler.bs.crop_size_width', 256)
if is_segmentation_model:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.output_stride' ,16 )
if "_deeplabv3" in task_name:
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_rates' ,[12, 24, 36] )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_out_channels' ,5_12 )
lowerCamelCase_ = getattr(__UpperCamelCase ,'model.segmentation.deeplabv3.aspp_dropout' ,0.1 )
# id2label
lowerCamelCase_ = 'huggingface/label-files'
lowerCamelCase_ = json.load(open(hf_hub_download(__UpperCamelCase ,__UpperCamelCase ,repo_type='dataset' ) ,'r' ) )
lowerCamelCase_ = {int(__UpperCamelCase ): v for k, v in idalabel.items()}
lowerCamelCase_ = idalabel
lowerCamelCase_ = {v: k for k, v in idalabel.items()}
return config
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[int]:
lowerCamelCase_ = dct.pop(__UpperCamelCase )
lowerCamelCase_ = val
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase=False ) -> int:
if base_model:
lowerCamelCase_ = ''
else:
lowerCamelCase_ = 'mobilevitv2.'
lowerCamelCase_ = []
for k in state_dict.keys():
if k[:8] == "encoder.":
lowerCamelCase_ = k[8:]
else:
lowerCamelCase_ = k
if ".block." in k:
lowerCamelCase_ = k_new.replace('.block.' ,'.' )
if ".conv." in k:
lowerCamelCase_ = k_new.replace('.conv.' ,'.convolution.' )
if ".norm." in k:
lowerCamelCase_ = k_new.replace('.norm.' ,'.normalization.' )
if "conv_1." in k:
lowerCamelCase_ = k_new.replace('conv_1.' ,f'''{model_prefix}conv_stem.''' )
for i in [1, 2]:
if f'''layer_{i}.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layer.''' )
if ".exp_1x1." in k:
lowerCamelCase_ = k_new.replace('.exp_1x1.' ,'.expand_1x1.' )
if ".red_1x1." in k:
lowerCamelCase_ = k_new.replace('.red_1x1.' ,'.reduce_1x1.' )
for i in [3, 4, 5]:
if f'''layer_{i}.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.downsampling_layer.''' )
if f'''layer_{i}.1.local_rep.0.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.0.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_kxk.''' )
if f'''layer_{i}.1.local_rep.1.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.local_rep.1.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_1x1.''' )
for i in [3, 4, 5]:
if i == 3:
lowerCamelCase_ = [0, 1]
elif i == 4:
lowerCamelCase_ = [0, 1, 2, 3]
elif i == 5:
lowerCamelCase_ = [0, 1, 2]
for j in j_in:
if f'''layer_{i}.1.global_rep.{j}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j}.''' ,f'''{model_prefix}encoder.layer.{i-1}.transformer.layer.{j}.''' )
if f'''layer_{i}.1.global_rep.{j+1}.''' in k:
lowerCamelCase_ = k_new.replace(
f'''layer_{i}.1.global_rep.{j+1}.''' ,f'''{model_prefix}encoder.layer.{i-1}.layernorm.''' )
if f'''layer_{i}.1.conv_proj.''' in k:
lowerCamelCase_ = k_new.replace(f'''layer_{i}.1.conv_proj.''' ,f'''{model_prefix}encoder.layer.{i-1}.conv_projection.''' )
if "pre_norm_attn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.0.' ,'layernorm_before.' )
if "pre_norm_attn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_attn.1.' ,'attention.' )
if "pre_norm_ffn.0." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.0.' ,'layernorm_after.' )
if "pre_norm_ffn.1." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.1.' ,'ffn.conv1.' )
if "pre_norm_ffn.3." in k:
lowerCamelCase_ = k_new.replace('pre_norm_ffn.3.' ,'ffn.conv2.' )
if "classifier.1." in k:
lowerCamelCase_ = k_new.replace('classifier.1.' ,'classifier.' )
if "seg_head." in k:
lowerCamelCase_ = k_new.replace('seg_head.' ,'segmentation_head.' )
if ".aspp_layer." in k:
lowerCamelCase_ = k_new.replace('.aspp_layer.' ,'.' )
if ".aspp_pool." in k:
lowerCamelCase_ = k_new.replace('.aspp_pool.' ,'.' )
rename_keys.append((k, k_new) )
return rename_keys
def _UpperCamelCase ( __UpperCamelCase ) -> Dict:
lowerCamelCase_ = []
for k in state_dict.keys():
if k.startswith('seg_head.aux_head.' ):
keys_to_ignore.append(__UpperCamelCase )
for k in keys_to_ignore:
state_dict.pop(__UpperCamelCase ,__UpperCamelCase )
def _UpperCamelCase ( ) -> Any:
lowerCamelCase_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
# url = "https://cdn.britannica.com/86/141086-050-9D7C75EE/Gulfstream-G450-business-jet-passengers.jpg"
lowerCamelCase_ = Image.open(requests.get(__UpperCamelCase ,stream=__UpperCamelCase ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase ) -> Optional[Any]:
lowerCamelCase_ = get_mobilevitva_config(__UpperCamelCase ,__UpperCamelCase )
# load original state_dict
lowerCamelCase_ = torch.load(__UpperCamelCase ,map_location='cpu' )
# load huggingface model
if task_name.startswith('ade20k_' ) or task_name.startswith('voc_' ):
lowerCamelCase_ = MobileViTVaForSemanticSegmentation(__UpperCamelCase ).eval()
lowerCamelCase_ = False
else:
lowerCamelCase_ = MobileViTVaForImageClassification(__UpperCamelCase ).eval()
lowerCamelCase_ = False
# remove and rename some keys of load the original model
lowerCamelCase_ = checkpoint
remove_unused_keys(__UpperCamelCase )
lowerCamelCase_ = create_rename_keys(__UpperCamelCase ,base_model=__UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
# load modified state_dict
model.load_state_dict(__UpperCamelCase )
# Check outputs on an image, prepared by MobileViTImageProcessor
lowerCamelCase_ = MobileViTImageProcessor(crop_size=config.image_size ,size=config.image_size + 32 )
lowerCamelCase_ = image_processor(images=prepare_img() ,return_tensors='pt' )
lowerCamelCase_ = model(**__UpperCamelCase )
# verify classification model
if task_name.startswith('imagenet' ):
lowerCamelCase_ = outputs.logits
lowerCamelCase_ = logits.argmax(-1 ).item()
print('Predicted class:' ,model.config.idalabel[predicted_class_idx] )
if task_name.startswith('imagenet1k_256' ) and config.width_multiplier == 1.0:
# expected_logits for base variant
lowerCamelCase_ = torch.tensor([-1.6336e00, -7.3204e-02, -5.1883e-01] )
assert torch.allclose(logits[0, :3] ,__UpperCamelCase ,atol=1e-4 )
Path(__UpperCamelCase ).mkdir(exist_ok=__UpperCamelCase )
print(f'''Saving model {task_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__UpperCamelCase )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--task",
default="imagenet1k_256",
type=str,
help=(
"Name of the task for which the MobileViTV2 model you'd like to convert is trained on . "
"\n Classification (ImageNet-1k)\n - MobileViTV2 (256x256) : imagenet1k_256\n - MobileViTV2 (Trained on 256x256 and Finetuned on 384x384) : imagenet1k_384\n - MobileViTV2 (Trained on ImageNet-21k and Finetuned on ImageNet-1k 256x256) :\n imagenet21k_to_1k_256\n - MobileViTV2 (Trained on ImageNet-21k, Finetuned on ImageNet-1k 256x256, and Finetuned on\n ImageNet-1k 384x384) : imagenet21k_to_1k_384\n Segmentation\n - ADE20K Dataset : ade20k_deeplabv3\n - Pascal VOC 2012 Dataset: voc_deeplabv3\n "
),
choices=[
"imagenet1k_256",
"imagenet1k_384",
"imagenet21k_to_1k_256",
"imagenet21k_to_1k_384",
"ade20k_deeplabv3",
"voc_deeplabv3",
],
)
parser.add_argument(
"--orig_checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument("--orig_config_path", required=True, type=str, help="Path to the original config file.")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
A_ = parser.parse_args()
convert_mobilevitva_checkpoint(
args.task, args.orig_checkpoint_path, args.orig_config_path, args.pytorch_dump_folder_path
)
| 384 |
'''simple docstring'''
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
A_ = "\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n"
A_ = "\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n"
A_ = "\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for 'cvit-mkb-clsr' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for 'cvit-mkb-clsr' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n \"accuracy\": Accuracy\n \"f1\": F1 score\n \"precision\": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wnli') # 'wnli' or any of [\"copa\", \"sna\", \"csqa\", \"wstp\", \"inltkh\", \"bbca\", \"iitp-mr\", \"iitp-pr\", \"actsa-sc\", \"md\"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'wiki-ner')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'accuracy': 1.0, 'f1': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric('indic_glue', 'cvit-mkb-clsr')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {'precision@10': 1.0}\n\n"
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
return float((preds == labels).mean() )
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> str:
lowerCamelCase_ = simple_accuracy(__UpperCamelCase ,__UpperCamelCase )
lowerCamelCase_ = float(fa_score(y_true=__UpperCamelCase ,y_pred=__UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( __UpperCamelCase ,__UpperCamelCase ) -> List[str]:
lowerCamelCase_ = np.array(__UpperCamelCase )
lowerCamelCase_ = np.array(__UpperCamelCase )
lowerCamelCase_ = en_sentvecs.shape[0]
# mean centering
lowerCamelCase_ = en_sentvecs - np.mean(__UpperCamelCase ,axis=0 )
lowerCamelCase_ = in_sentvecs - np.mean(__UpperCamelCase ,axis=0 )
lowerCamelCase_ = cdist(__UpperCamelCase ,__UpperCamelCase ,'cosine' )
lowerCamelCase_ = np.array(range(__UpperCamelCase ) )
lowerCamelCase_ = sim.argsort(axis=1 )[:, :10]
lowerCamelCase_ = np.any(preds == actual[:, None] ,axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase( self ) -> Tuple:
'''simple docstring'''
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='numpy' if self.config_name != 'cvit-mkb-clsr' else None , )
def UpperCamelCase( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 384 | 1 |
"""simple docstring"""
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return params[f'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def lowerCAmelCase_ ( UpperCamelCase__ : Dict , UpperCamelCase__ : Any , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Union[str, Any]="attention" ):
"""simple docstring"""
__lowercase = __lowercase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
__lowercase = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
__lowercase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
__lowercase = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
__lowercase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
__lowercase = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
__lowercase = np.ascontiguousarray(params[f'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
__lowercase = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def lowerCAmelCase_ ( UpperCamelCase__ : int , UpperCamelCase__ : List[str] , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple=False ):
"""simple docstring"""
if split_mlp_wi:
__lowercase = params[f'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
__lowercase = params[f'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
__lowercase = (wi_a, wi_a)
else:
__lowercase = params[f'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
__lowercase = params[f'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Dict ):
"""simple docstring"""
return params[f'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def lowerCAmelCase_ ( UpperCamelCase__ : dict , *, UpperCamelCase__ : int , UpperCamelCase__ : bool , UpperCamelCase__ : bool = False ):
"""simple docstring"""
__lowercase = traverse_util.flatten_dict(variables["""target"""] )
__lowercase = {"""/""".join(UpperCamelCase__ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
__lowercase = """encoder/encoder/mlp/wi_0/kernel""" in old
print("""Split MLP:""" , UpperCamelCase__ )
__lowercase = collections.OrderedDict()
# Shared embeddings.
__lowercase = old["""token_embedder/embedding"""]
# Encoder.
for i in range(UpperCamelCase__ ):
# Block i, layer 0 (Self Attention).
__lowercase = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , """encoder""" , """pre_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , """encoder""" , """attention""" )
__lowercase = layer_norm
__lowercase = k.T
__lowercase = o.T
__lowercase = q.T
__lowercase = v.T
# Block i, layer 1 (MLP).
__lowercase = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , """encoder""" , """pre_mlp_layer_norm""" )
__lowercase , __lowercase = tax_mlp_lookup(UpperCamelCase__ , UpperCamelCase__ , """encoder""" , UpperCamelCase__ )
__lowercase = layer_norm
if split_mlp_wi:
__lowercase = wi[0].T
__lowercase = wi[1].T
else:
__lowercase = wi.T
__lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase = tax_relpos_bias_lookup(
UpperCamelCase__ , UpperCamelCase__ , """encoder""" ).T
__lowercase = old["""encoder/encoder_norm/scale"""]
if not scalable_attention:
__lowercase = tax_relpos_bias_lookup(
UpperCamelCase__ , 0 , """encoder""" ).T
__lowercase = tax_relpos_bias_lookup(
UpperCamelCase__ , 0 , """decoder""" ).T
if not is_encoder_only:
# Decoder.
for i in range(UpperCamelCase__ ):
# Block i, layer 0 (Self Attention).
__lowercase = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , """decoder""" , """pre_self_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , """decoder""" , """self_attention""" )
__lowercase = layer_norm
__lowercase = k.T
__lowercase = o.T
__lowercase = q.T
__lowercase = v.T
# Block i, layer 1 (Cross Attention).
__lowercase = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , """decoder""" , """pre_cross_attention_layer_norm""" )
__lowercase , __lowercase , __lowercase , __lowercase = tax_attention_lookup(UpperCamelCase__ , UpperCamelCase__ , """decoder""" , """encoder_decoder_attention""" )
__lowercase = layer_norm
__lowercase = k.T
__lowercase = o.T
__lowercase = q.T
__lowercase = v.T
# Block i, layer 2 (MLP).
__lowercase = tax_layer_norm_lookup(UpperCamelCase__ , UpperCamelCase__ , """decoder""" , """pre_mlp_layer_norm""" )
__lowercase , __lowercase = tax_mlp_lookup(UpperCamelCase__ , UpperCamelCase__ , """decoder""" , UpperCamelCase__ )
__lowercase = layer_norm
if split_mlp_wi:
__lowercase = wi[0].T
__lowercase = wi[1].T
else:
__lowercase = wi.T
__lowercase = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
__lowercase = tax_relpos_bias_lookup(UpperCamelCase__ , UpperCamelCase__ , """decoder""" ).T
__lowercase = old["""decoder/decoder_norm/scale"""]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
__lowercase = old["""decoder/logits_dense/kernel"""].T
return new
def lowerCAmelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : bool ):
"""simple docstring"""
__lowercase = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
__lowercase = state_dict["""shared.weight"""]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
__lowercase = state_dict["""shared.weight"""]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("""Using shared word embeddings as lm_head.""" )
__lowercase = state_dict["""shared.weight"""]
return state_dict
def lowerCAmelCase_ ( UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Tuple , UpperCamelCase__ : Optional[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Tuple ):
"""simple docstring"""
__lowercase = checkpoints.load_tax_checkpoint(UpperCamelCase__ )
__lowercase = convert_tax_to_pytorch(
UpperCamelCase__ , num_layers=config.num_layers , is_encoder_only=UpperCamelCase__ , scalable_attention=UpperCamelCase__ )
__lowercase = make_state_dict(UpperCamelCase__ , UpperCamelCase__ )
model.load_state_dict(UpperCamelCase__ , strict=UpperCamelCase__ )
def lowerCAmelCase_ ( UpperCamelCase__ : Tuple , UpperCamelCase__ : Any , UpperCamelCase__ : List[Any] , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
"""simple docstring"""
__lowercase = MTaConfig.from_json_file(UpperCamelCase__ )
print(f'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
__lowercase = UMTaEncoderModel(UpperCamelCase__ )
else:
__lowercase = UMTaForConditionalGeneration(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(UpperCamelCase__ )
# Verify that we can load the checkpoint.
model.from_pretrained(UpperCamelCase__ )
print("""Done""" )
if __name__ == "__main__":
UpperCAmelCase__ =argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
parser.add_argument(
"--scalable_attention",
action="store_true",
help="Whether the model uses scaled attention (umt5 model)",
default=False,
)
UpperCAmelCase__ =parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 616 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowerCamelCase__ ( _a , _a , unittest.TestCase ):
a : Any = IFPipeline
a : str = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
a : Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a : int = PipelineTesterMixin.required_optional_params - {"""latents"""}
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
return self._get_dummy_components()
def SCREAMING_SNAKE_CASE_ ( self : Any , A_ : int , A_ : Dict=0 ):
'''simple docstring'''
if str(A_ ).startswith("""mps""" ):
__lowercase = torch.manual_seed(A_ )
else:
__lowercase = torch.Generator(device=A_ ).manual_seed(A_ )
__lowercase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""output_type""": """numpy""",
}
return inputs
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != """cuda""" , reason="""float16 requires CUDA""" )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
super().test_save_load_floataa(expected_max_diff=1e-1 )
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ):
'''simple docstring'''
self._test_save_load_local()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
@slow
@require_torch_gpu
class lowerCamelCase__ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ):
'''simple docstring'''
__lowercase = IFPipeline.from_pretrained("""DeepFloyd/IF-I-XL-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa )
__lowercase = IFSuperResolutionPipeline.from_pretrained(
"""DeepFloyd/IF-II-L-v1.0""" , variant="""fp16""" , torch_dtype=torch.floataa , text_encoder=A_ , tokenizer=A_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to("""cuda""" )
__lowercase , __lowercase = pipe_a.encode_prompt("""anime turtle""" , device="""cuda""" )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
__lowercase = None
__lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
__lowercase = IFImgaImgPipeline(**pipe_a.components )
__lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(A_ , A_ , A_ , A_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
__lowercase = IFInpaintingPipeline(**pipe_a.components )
__lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(A_ , A_ , A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] , A_ : Any , A_ : int , A_ : str , A_ : Dict ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] , A_ : Tuple , A_ : List[Any] , A_ : List[Any] , A_ : Any ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def SCREAMING_SNAKE_CASE_ ( self : List[str] , A_ : List[Any] , A_ : str , A_ : List[Any] , A_ : List[Any] ):
'''simple docstring'''
_start_torch_memory_measurement()
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(A_ )
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , num_inference_steps=2 , generator=A_ , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy""" )
assert_mean_pixel_difference(A_ , A_ )
# pipeline 2
_start_torch_memory_measurement()
__lowercase = torch.Generator(device="""cpu""" ).manual_seed(0 )
__lowercase = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(A_ )
__lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(A_ )
__lowercase = pipe_a(
prompt_embeds=A_ , negative_prompt_embeds=A_ , image=A_ , mask_image=A_ , original_image=A_ , generator=A_ , num_inference_steps=2 , output_type="""np""" , )
__lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
__lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
__lowercase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy""" )
assert_mean_pixel_difference(A_ , A_ )
def lowerCAmelCase_ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 616 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = '''▁'''
__snake_case = {'''vocab_file''': '''spiece.model'''}
__snake_case = {
'''vocab_file''': {
'''google/reformer-crime-and-punishment''': (
'''https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model'''
)
}
}
__snake_case = {
'''google/reformer-crime-and-punishment''': 524288,
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self , __UpperCAmelCase , __UpperCAmelCase="</s>" , __UpperCAmelCase="<unk>" , __UpperCAmelCase=[] , __UpperCAmelCase = None , **__UpperCAmelCase , ) -> None:
_a = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCAmelCase , )
_a = vocab_file
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCAmelCase )
@property
def _UpperCAmelCase ( self ) -> Union[str, Any]:
return self.sp_model.get_piece_size()
def _UpperCAmelCase ( self ) -> Dict[str, int]:
_a = {self.convert_ids_to_tokens(__UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self ) -> Union[str, Any]:
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self , __UpperCAmelCase ) -> Optional[Any]:
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[str]:
return self.sp_model.encode(__UpperCAmelCase , out_type=__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Any:
return self.sp_model.piece_to_id(__UpperCAmelCase )
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> Tuple:
if index < self.sp_model.get_piece_size():
_a = self.sp_model.IdToPiece(__UpperCAmelCase )
return token
def _UpperCAmelCase ( self , __UpperCAmelCase ) -> List[Any]:
_a = []
_a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCAmelCase ) + token
_a = []
else:
current_sub_tokens.append(__UpperCAmelCase )
out_string += self.sp_model.decode(__UpperCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCAmelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCAmelCase , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__UpperCAmelCase )
return (out_vocab_file,) | 285 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {'''configuration_vit_mae''': ['''VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ViTMAEConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ViTMAEForPreTraining''',
'''ViTMAELayer''',
'''ViTMAEModel''',
'''ViTMAEPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TFViTMAEForPreTraining''',
'''TFViTMAEModel''',
'''TFViTMAEPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_vit_mae import VIT_MAE_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMAEConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_mae import (
VIT_MAE_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMAEForPreTraining,
ViTMAELayer,
ViTMAEModel,
ViTMAEPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit_mae import TFViTMAEForPreTraining, TFViTMAEModel, TFViTMAEPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 285 | 1 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class __a :
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Dict=2 , SCREAMING_SNAKE_CASE : Optional[int]=3 , SCREAMING_SNAKE_CASE : Optional[Any]=64 , SCREAMING_SNAKE_CASE : Any=None ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = np.random.default_rng(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : Optional[int] = length
UpperCamelCase__ : List[str] = rng.normal(size=(length,) ).astype(np.floataa )
UpperCamelCase__ : Union[str, Any] = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : Any ):
'''simple docstring'''
return self.length
def __getitem__( self : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
return {"x": self.x[i], "y": self.y[i]}
class __a ( torch.nn.Module ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str=0 , SCREAMING_SNAKE_CASE : int=0 , SCREAMING_SNAKE_CASE : Tuple=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ : Optional[int] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
UpperCamelCase__ : Union[str, Any] = True
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
UpperCamelCase__ : Dict = False
return x * self.a[0] + self.b[0]
class __a ( torch.nn.Module ):
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE : Tuple=0 , SCREAMING_SNAKE_CASE : Union[str, Any]=0 , SCREAMING_SNAKE_CASE : Optional[int]=False ):
'''simple docstring'''
super().__init__()
UpperCamelCase__ : int = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ).float() )
UpperCamelCase__ : Dict = torch.nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE ).float() )
UpperCamelCase__ : List[Any] = True
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : Optional[int]=None ):
'''simple docstring'''
if self.first_batch:
print(F'Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}' )
UpperCamelCase__ : Optional[int] = False
return x * self.a + self.b
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 16 ) -> Any:
from datasets import load_dataset
from transformers import AutoTokenizer
UpperCamelCase__ : List[Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
UpperCamelCase__ : Any = {"train": "tests/test_samples/MRPC/train.csv", "validation": "tests/test_samples/MRPC/dev.csv"}
UpperCamelCase__ : List[str] = load_dataset("csv" , data_files=__lowerCAmelCase )
UpperCamelCase__ : Dict = datasets["train"].unique("label" )
UpperCamelCase__ : Optional[int] = {v: i for i, v in enumerate(__lowerCAmelCase )}
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ : Union[str, Any] = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" )
if "label" in examples:
UpperCamelCase__ : Optional[Any] = [label_to_id[l] for l in examples["label"]]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ : Optional[Any] = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase__ : List[Any] = DataLoader(tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=2 )
UpperCamelCase__ : Union[str, Any] = DataLoader(tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=1 )
return train_dataloader, eval_dataloader | 228 |
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase = 10 , __lowerCAmelCase = 22 ) -> int:
UpperCamelCase__ : Any = range(1 , __lowerCAmelCase )
UpperCamelCase__ : Any = range(1 , __lowerCAmelCase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""") | 228 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a : int = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Optional[int] = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Union[str, Any] = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a : Any = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
_a : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 716 |
from __future__ import annotations
import math
from collections.abc import Callable
def snake_case__ ( UpperCAmelCase : Callable[[int | float], int | float] , UpperCAmelCase : int | float , UpperCAmelCase : int | float , UpperCAmelCase : int = 1_0_0 , ):
lowerCAmelCase__ :int = x_start
lowerCAmelCase__ :int = fnc(UpperCAmelCase )
lowerCAmelCase__ :List[Any] = 0.0
for _ in range(UpperCAmelCase ):
# Approximates curve as a sequence of linear lines and sums their length
lowerCAmelCase__ :Dict = (x_end - x_start) / steps + xa
lowerCAmelCase__ :Optional[Any] = fnc(UpperCAmelCase )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
lowerCAmelCase__ :Union[str, Any] = xa
lowerCAmelCase__ :str = fxa
return length
if __name__ == "__main__":
def snake_case__ ( UpperCAmelCase : int ):
return math.sin(1_0 * x )
print("""f(x) = sin(10 * x)""")
print("""The length of the curve from x = -10 to x = 10 is:""")
_a : Any = 10
while i <= 10_0000:
print(f"""With {i} steps: {line_length(f, -10, 10, i)}""")
i *= 10
| 111 | 0 |
from collections import namedtuple
import requests
from lxml import html # type: ignore
__a: List[str] = namedtuple('''covid_data''', '''cases deaths recovered''')
def _SCREAMING_SNAKE_CASE ( __snake_case = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
_UpperCAmelCase = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(__snake_case ).content ).xpath(__snake_case ) )
__a: Tuple = '''Total COVID-19 cases in the world: {}
Total deaths due to COVID-19 in the world: {}
Total COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats())) | 108 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__a: Any = logging.get_logger(__name__)
__a: Dict = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
__a: int = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
__a: str = {'''facebook/blenderbot_small-90M''': 512}
def _SCREAMING_SNAKE_CASE ( __snake_case ) -> List[str]:
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(__snake_case )
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = ['''input_ids''', '''attention_mask''']
def __init__( self : List[Any] , lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : List[str]="__start__" , lowerCamelCase : List[Any]="__end__" , lowerCamelCase : Any="__unk__" , lowerCamelCase : Optional[Any]="__null__" , **lowerCamelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(unk_token=lowerCamelCase , bos_token=lowerCamelCase , eos_token=lowerCamelCase , pad_token=lowerCamelCase , **lowerCamelCase )
with open(lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(lowerCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(lowerCamelCase , range(len(lowerCamelCase ) ) ) )
_UpperCAmelCase = {}
@property
def lowerCamelCase ( self : str ) -> int:
"""simple docstring"""
return len(self.encoder )
def lowerCamelCase ( self : List[Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase ( self : Optional[Any] , lowerCamelCase : str ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = re.sub("""([.,!?()])""" , r""" \1""" , lowerCamelCase )
_UpperCAmelCase = re.sub("""(')""" , r""" \1 """ , lowerCamelCase )
_UpperCAmelCase = re.sub(r"""\s{2,}""" , """ """ , lowerCamelCase )
if "\n" in token:
_UpperCAmelCase = token.replace("""\n""" , """ __newln__""" )
_UpperCAmelCase = token.split(""" """ )
_UpperCAmelCase = []
for token in tokens:
if not len(lowerCamelCase ):
continue
_UpperCAmelCase = token.lower()
_UpperCAmelCase = tuple(lowerCamelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
_UpperCAmelCase = min(lowerCamelCase , key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(lowerCamelCase ):
try:
_UpperCAmelCase = word.index(lowerCamelCase , lowerCamelCase )
new_word.extend(word[i:j] )
_UpperCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(lowerCamelCase )
_UpperCAmelCase = new_word
if len(lowerCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(lowerCamelCase )
_UpperCAmelCase = """@@ """.join(lowerCamelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def lowerCamelCase ( self : Any , lowerCamelCase : str ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(r"""\S+\n?""" , lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def lowerCamelCase ( self : Tuple , lowerCamelCase : str ) -> int:
"""simple docstring"""
_UpperCAmelCase = token.lower()
return self.encoder.get(lowerCamelCase , self.encoder.get(self.unk_token ) )
def lowerCamelCase ( self : List[str] , lowerCamelCase : int ) -> str:
"""simple docstring"""
return self.decoder.get(lowerCamelCase , self.unk_token )
def lowerCamelCase ( self : Dict , lowerCamelCase : List[str] ) -> str:
"""simple docstring"""
_UpperCAmelCase = """ """.join(lowerCamelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def lowerCamelCase ( self : List[Any] , lowerCamelCase : str , lowerCamelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase , ensure_ascii=lowerCamelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file | 108 | 1 |
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
while second != 0:
lowerCAmelCase__ : Optional[int] = first & second
first ^= second
lowerCAmelCase__ : Optional[int] = c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case : List[str] =int(input('Enter the first number: ').strip())
__snake_case : int =int(input('Enter the second number: ').strip())
print(f"""{add(first, second) = }""")
| 90 |
from bisect import bisect
from itertools import accumulate
def lowerCAmelCase__ ( lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[Any] ,lowerCamelCase_ : List[str] ,lowerCamelCase_ : Optional[int]):
'''simple docstring'''
lowerCAmelCase__ : Any = sorted(zip(lowerCamelCase_ ,lowerCamelCase_) ,key=lambda lowerCamelCase_: x[0] / x[1] ,reverse=lowerCamelCase_)
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = [i[0] for i in r], [i[1] for i in r]
lowerCAmelCase__ : Tuple = list(accumulate(lowerCamelCase_))
lowerCAmelCase__ : str = bisect(lowerCamelCase_ ,lowerCamelCase_)
return (
0
if k == 0
else sum(vl[:k]) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k])
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 90 | 1 |
"""simple docstring"""
def _snake_case ( snake_case__ : list ):
A = len(snake_case__ )
for i in range(1 , snake_case__ ):
A = collection[i]
A = 0
A = i - 1
while low <= high:
A = (low + high) // 2
if val < collection[mid]:
A = mid - 1
else:
A = mid + 1
for j in range(snake_case__ , snake_case__ , -1 ):
A = collection[j - 1]
A = val
return collection
if __name__ == "__main__":
_lowercase = input('''Enter numbers separated by a comma:\n''').strip()
_lowercase = [int(item) for item in user_input.split(''',''')]
print(binary_insertion_sort(unsorted)) | 91 |
"""simple docstring"""
import copy
import re
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = '''hp'''
_lowerCamelCase: List[Any] = {}
_lowerCamelCase: List[Any] = None
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : List[str] ,A_ : Optional[Any] ) -> Tuple:
A = prefix
A = defaults
cls.build_naming_info()
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Any ,A_ : List[Any] ) -> int:
if len(A_ ) == 0:
return ""
A = None
if any(char.isdigit() for char in word ):
raise Exception(F'Parameters should not contain numbers: \'{word}\' contains a number' )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 ,len(A_ ) + 1 ):
A = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
A = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(A_ : Optional[Any] ):
A = ''
while integer != 0:
A = chr(ord('A' ) + integer % 10 ) + s
integer //= 10
return s
A = 0
while True:
A = word + '#' + int_to_alphabetic(A_ )
if sword in info["reverse_short_word"]:
continue
else:
A = sword
break
A = short_word
A = word
return short_word
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
A = param_name.split('_' )
A = [TrialShortNamer.shortname_for_word(A_ ,A_ ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
A = ['', '_']
for separator in separators:
A = separator.join(A_ )
if shortname not in info["reverse_short_param"]:
A = shortname
A = param_name
return shortname
return param_name
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : List[Any] ,A_ : Any ) -> Tuple:
A = TrialShortNamer.shortname_for_key(A_ ,A_ )
A = short_name
A = param_name
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Dict ) -> List[Any]:
if cls.NAMING_INFO is not None:
return
A = {
'short_word': {},
'reverse_short_word': {},
'short_param': {},
'reverse_short_param': {},
}
A = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(A_ ,A_ )
A = info
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[Any] ,A_ : Union[str, Any] ) -> Union[str, Any]:
cls.build_naming_info()
assert cls.PREFIX is not None
A = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(F'You should provide a default value for the param name {k} with value {v}' )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
A = cls.NAMING_INFO['short_param'][k]
if isinstance(A_ ,A_ ):
A = 1 if v else 0
A = '' if isinstance(A_ ,(int, float) ) else '-'
A = F'{key}{sep}{v}'
name.append(A_ )
return "_".join(A_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : List[str] ,A_ : Any ) -> int:
A = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
A = []
else:
A = repr.split('_' )
A = {}
for value in values:
if "-" in value:
A , A = value.split('-' )
else:
A = re.sub('[0-9.]' ,'' ,A_ )
A = float(re.sub('[^0-9.]' ,'' ,A_ ) )
A = cls.NAMING_INFO['reverse_short_param'][p_k]
A = p_v
for k in cls.DEFAULTS:
if k not in parameters:
A = cls.DEFAULTS[k]
return parameters | 91 | 1 |
'''simple docstring'''
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def _lowerCAmelCase ( _lowerCAmelCase = "" )-> dict[str, float]:
__UpperCAmelCase = url or 'https://www.imdb.com/chart/top/?ref_=nv_mv_250'
__UpperCAmelCase = BeautifulSoup(requests.get(_lowerCAmelCase ).text , 'html.parser' )
__UpperCAmelCase = soup.find_all('td' , attrs='titleColumn' )
__UpperCAmelCase = soup.find_all('td' , class_='ratingColumn imdbRating' )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(_lowerCAmelCase , _lowerCAmelCase )
}
def _lowerCAmelCase ( _lowerCAmelCase = "IMDb_Top_250_Movies.csv" )-> None:
__UpperCAmelCase = get_imdb_top_aaa_movies()
with open(_lowerCAmelCase , 'w' , newline='' ) as out_file:
__UpperCAmelCase = csv.writer(_lowerCAmelCase )
writer.writerow(['Movie title', 'IMDb rating'] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 617 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
_A: str = """Create a default config file for Accelerate with only a few flags set."""
def _lowerCAmelCase ( _lowerCAmelCase="no" , _lowerCAmelCase = default_json_config_file , _lowerCAmelCase = False )-> List[Any]:
__UpperCAmelCase = Path(_lowerCAmelCase )
path.parent.mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
if path.exists():
print(
F'Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.' )
return False
__UpperCAmelCase = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}' )
__UpperCAmelCase = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
__UpperCAmelCase = torch.cuda.device_count()
__UpperCAmelCase = num_gpus
__UpperCAmelCase = False
if num_gpus > 1:
__UpperCAmelCase = 'MULTI_GPU'
else:
__UpperCAmelCase = 'NO'
elif is_xpu_available() and use_xpu:
__UpperCAmelCase = torch.xpu.device_count()
__UpperCAmelCase = num_xpus
__UpperCAmelCase = False
if num_xpus > 1:
__UpperCAmelCase = 'MULTI_XPU'
else:
__UpperCAmelCase = 'NO'
elif is_npu_available():
__UpperCAmelCase = torch.npu.device_count()
__UpperCAmelCase = num_npus
__UpperCAmelCase = False
if num_npus > 1:
__UpperCAmelCase = 'MULTI_NPU'
else:
__UpperCAmelCase = 'NO'
else:
__UpperCAmelCase = 0
__UpperCAmelCase = True
__UpperCAmelCase = 1
__UpperCAmelCase = 'NO'
__UpperCAmelCase = ClusterConfig(**_lowerCAmelCase )
config.to_json_file(_lowerCAmelCase )
return path
def _lowerCAmelCase ( _lowerCAmelCase , _lowerCAmelCase )-> List[str]:
__UpperCAmelCase = parser.add_parser('default' , parents=_lowerCAmelCase , help=_lowerCAmelCase , formatter_class=_lowerCAmelCase )
parser.add_argument(
'--config_file' , default=_lowerCAmelCase , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=_lowerCAmelCase , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=_lowerCAmelCase )
return parser
def _lowerCAmelCase ( _lowerCAmelCase )-> Union[str, Any]:
__UpperCAmelCase = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'accelerate configuration saved at {config_file}' )
| 617 | 1 |
import json
from typing import Iterator, List, Union
from tokenizers import AddedToken, Regex, Tokenizer, decoders, normalizers, pre_tokenizers, trainers
from tokenizers.implementations.base_tokenizer import BaseTokenizer
from tokenizers.models import Unigram
from tokenizers.processors import TemplateProcessing
class __lowerCAmelCase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , _snake_case : str = "▁" , _snake_case : bool = True , _snake_case : Union[str, AddedToken] = "<unk>" , _snake_case : Union[str, AddedToken] = "</s>" , _snake_case : Union[str, AddedToken] = "<pad>" , ):
"""simple docstring"""
A__ = {
'pad': {'id': 0, 'token': pad_token},
'eos': {'id': 1, 'token': eos_token},
'unk': {'id': 2, 'token': unk_token},
}
A__ = [None] * len(self.special_tokens )
for token_dict in self.special_tokens.values():
A__ = token_dict['token']
A__ = Tokenizer(Unigram() )
A__ = normalizers.Sequence(
[
normalizers.Nmt(),
normalizers.NFKC(),
normalizers.Replace(Regex(' {2,}' ) , ' ' ),
normalizers.Lowercase(),
] )
A__ = pre_tokenizers.Sequence(
[
pre_tokenizers.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case ),
pre_tokenizers.Digits(individual_digits=_snake_case ),
pre_tokenizers.Punctuation(),
] )
A__ = decoders.Metaspace(replacement=_snake_case , add_prefix_space=_snake_case )
A__ = TemplateProcessing(
single=F'''$A {self.special_tokens["eos"]["token"]}''' , special_tokens=[(self.special_tokens['eos']['token'], self.special_tokens['eos']['id'])] , )
A__ = {
'model': 'SentencePieceUnigram',
'replacement': replacement,
'add_prefix_space': add_prefix_space,
}
super().__init__(_snake_case , _snake_case )
def _a ( self : int , _snake_case : Union[str, List[str]] , _snake_case : int = 80_00 , _snake_case : bool = True , ):
"""simple docstring"""
A__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
if isinstance(_snake_case , _snake_case ):
A__ = [files]
self._tokenizer.train(_snake_case , trainer=_snake_case )
self.add_unk_id()
def _a ( self : Any , _snake_case : Union[Iterator[str], Iterator[Iterator[str]]] , _snake_case : int = 80_00 , _snake_case : bool = True , ):
"""simple docstring"""
A__ = trainers.UnigramTrainer(
vocab_size=_snake_case , special_tokens=self.special_tokens_list , show_progress=_snake_case , )
self._tokenizer.train_from_iterator(_snake_case , trainer=_snake_case )
self.add_unk_id()
def _a ( self : Union[str, Any] ):
"""simple docstring"""
A__ = json.loads(self._tokenizer.to_str() )
A__ = self.special_tokens['unk']['id']
A__ = Tokenizer.from_str(json.dumps(_snake_case ) )
| 9 |
import math
import flax.linen as nn
import jax.numpy as jnp
def lowerCamelCase__ ( lowercase , lowercase , lowercase = 1 , lowercase = 1 , lowercase = 1.0E4 , lowercase = False , lowercase = 1.0 , ):
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F'''Embedding dimension {embedding_dim} should be even'''
SCREAMING_SNAKE_CASE : Union[str, Any] = float(embedding_dim // 2 )
SCREAMING_SNAKE_CASE : Dict = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
SCREAMING_SNAKE_CASE : Optional[Any] = min_timescale * jnp.exp(jnp.arange(lowercase , dtype=jnp.floataa ) * -log_timescale_increment )
SCREAMING_SNAKE_CASE : Optional[int] = jnp.expand_dims(lowercase , 1 ) * jnp.expand_dims(lowercase , 0 )
# scale embeddings
SCREAMING_SNAKE_CASE : Optional[int] = scale * emb
if flip_sin_to_cos:
SCREAMING_SNAKE_CASE : List[Any] = jnp.concatenate([jnp.cos(lowercase ), jnp.sin(lowercase )] , axis=1 )
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = jnp.concatenate([jnp.sin(lowercase ), jnp.cos(lowercase )] , axis=1 )
SCREAMING_SNAKE_CASE : Tuple = jnp.reshape(lowercase , [jnp.shape(lowercase )[0], embedding_dim] )
return signal
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Tuple , UpperCAmelCase_ : str ):
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_1" )(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = nn.silu(UpperCAmelCase_ )
SCREAMING_SNAKE_CASE : Any = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="linear_2" )(UpperCAmelCase_ )
return temb
class SCREAMING_SNAKE_CASE ( nn.Module ):
'''simple docstring'''
UpperCamelCase_ : int = 3_2
UpperCamelCase_ : bool = False
UpperCamelCase_ : float = 1
@nn.compact
def __call__( self : Optional[int] , UpperCAmelCase_ : int ):
return get_sinusoidal_embeddings(
UpperCAmelCase_ , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift )
| 62 | 0 |
def SCREAMING_SNAKE_CASE__ ( lowerCAmelCase_ : int ,lowerCAmelCase_ : int ) -> int:
"""simple docstring"""
while second != 0:
SCREAMING_SNAKE_CASE_ : Dict =first & second
first ^= second
SCREAMING_SNAKE_CASE_ : Union[str, Any] =c << 1
return first
if __name__ == "__main__":
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = int(input('Enter the first number: ').strip())
__SCREAMING_SNAKE_CASE = int(input('Enter the second number: ').strip())
print(f"""{add(first, second) = }""")
| 715 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__A )
class lowerCAmelCase_ ( __A ):
'''simple docstring'''
_lowercase = field(default='text-classification' , metadata={'include_in_asdict_even_if_is_default': True} )
_lowercase = Features({'text': Value('string' )} )
_lowercase = Features({'labels': ClassLabel} )
_lowercase = "text"
_lowercase = "labels"
def __lowerCamelCase ( self , __UpperCAmelCase ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __UpperCAmelCase ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
SCREAMING_SNAKE_CASE_ : Union[str, Any] =copy.deepcopy(self )
SCREAMING_SNAKE_CASE_ : List[str] =self.label_schema.copy()
SCREAMING_SNAKE_CASE_ : Tuple =features[self.label_column]
SCREAMING_SNAKE_CASE_ : str =label_schema
return task_template
@property
def __lowerCamelCase ( self ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 153 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
UniSpeechSatConfig,
UniSpeechSatForAudioFrameClassification,
UniSpeechSatForSequenceClassification,
UniSpeechSatForXVector,
WavaVecaFeatureExtractor,
logging,
)
logging.set_verbosity_info()
snake_case_ : Any = logging.get_logger(__name__)
def lowercase_ ( _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : int ):
'''simple docstring'''
UpperCAmelCase : List[Any] = UniSpeechSatForSequenceClassification.from_pretrained(_lowercase , config=_lowercase )
UpperCAmelCase : List[str] = downstream_dict["projector.weight"]
UpperCAmelCase : str = downstream_dict["projector.bias"]
UpperCAmelCase : Optional[int] = downstream_dict["model.post_net.linear.weight"]
UpperCAmelCase : Union[str, Any] = downstream_dict["model.post_net.linear.bias"]
return model
def lowercase_ ( _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Optional[int] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = UniSpeechSatForAudioFrameClassification.from_pretrained(_lowercase , config=_lowercase )
UpperCAmelCase : Any = downstream_dict["model.linear.weight"]
UpperCAmelCase : Optional[Any] = downstream_dict["model.linear.bias"]
return model
def lowercase_ ( _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase : Any = UniSpeechSatForXVector.from_pretrained(_lowercase , config=_lowercase )
UpperCAmelCase : Tuple = downstream_dict["connector.weight"]
UpperCAmelCase : List[Any] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
UpperCAmelCase : Union[str, Any] = downstream_dict[
F"""model.framelevel_feature_extractor.module.{i}.kernel.weight"""
]
UpperCAmelCase : Optional[Any] = downstream_dict[F"""model.framelevel_feature_extractor.module.{i}.kernel.bias"""]
UpperCAmelCase : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
UpperCAmelCase : List[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
UpperCAmelCase : Any = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
UpperCAmelCase : int = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
UpperCAmelCase : Any = downstream_dict["objective.W"]
return model
@torch.no_grad()
def lowercase_ ( _lowercase : Dict , _lowercase : List[Any] , _lowercase : int , _lowercase : List[Any] ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = torch.load(_lowercase , map_location="cpu" )
UpperCAmelCase : Any = checkpoint["Downstream"]
UpperCAmelCase : str = UniSpeechSatConfig.from_pretrained(_lowercase )
UpperCAmelCase : Dict = WavaVecaFeatureExtractor.from_pretrained(
_lowercase , return_attention_mask=_lowercase , do_normalize=_lowercase )
UpperCAmelCase : List[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
UpperCAmelCase : Optional[int] = convert_classification(_lowercase , _lowercase , _lowercase )
elif arch.endswith("ForAudioFrameClassification" ):
UpperCAmelCase : str = convert_diarization(_lowercase , _lowercase , _lowercase )
elif arch.endswith("ForXVector" ):
UpperCAmelCase : Union[str, Any] = convert_xvector(_lowercase , _lowercase , _lowercase )
else:
raise NotImplementedError(F"""S3PRL weights conversion is not supported for {arch}""" )
if hf_config.use_weighted_layer_sum:
UpperCAmelCase : List[Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(_lowercase )
hf_model.save_pretrained(_lowercase )
if __name__ == "__main__":
snake_case_ : List[str] = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
snake_case_ : List[str] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 595 |
"""simple docstring"""
import os
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_doctest_list.py
snake_case_ : Any = """."""
if __name__ == "__main__":
snake_case_ : List[str] = os.path.join(REPO_PATH, """utils/documentation_tests.txt""")
snake_case_ : Any = []
snake_case_ : Tuple = []
with open(doctest_file_path) as fp:
for line in fp:
snake_case_ : List[Any] = line.strip()
snake_case_ : List[Any] = os.path.join(REPO_PATH, line)
if not (os.path.isfile(path) or os.path.isdir(path)):
non_existent_paths.append(line)
all_paths.append(path)
if len(non_existent_paths) > 0:
snake_case_ : Union[str, Any] = """\n""".join(non_existent_paths)
raise ValueError(f'''`utils/documentation_tests.txt` contains non-existent paths:\n{non_existent_paths}''')
if all_paths != sorted(all_paths):
raise ValueError("""Files in `utils/documentation_tests.txt` are not in alphabetical order.""")
| 595 | 1 |
'''simple docstring'''
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Any , __lowerCamelCase : str , __lowerCamelCase : int=7 , __lowerCamelCase : Optional[Any]=3 , __lowerCamelCase : List[Any]=30 , __lowerCamelCase : int=400 , __lowerCamelCase : List[Any]=True , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : str=True , __lowerCamelCase : Optional[int]=1 / 255 , __lowerCamelCase : str=True , __lowerCamelCase : str=[0.5, 0.5, 0.5] , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : Tuple=True , ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
lowerCamelCase__ = parent
lowerCamelCase__ = batch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = min_resolution
lowerCamelCase__ = max_resolution
lowerCamelCase__ = do_resize
lowerCamelCase__ = size
lowerCamelCase__ = do_rescale
lowerCamelCase__ = rescale_factor
lowerCamelCase__ = do_normalize
lowerCamelCase__ = image_mean
lowerCamelCase__ = image_std
lowerCamelCase__ = do_pad
def a__ ( self : str ) -> str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self : Dict , __lowerCamelCase : Dict , __lowerCamelCase : Dict=False ) -> Tuple:
'''simple docstring'''
if not batched:
lowerCamelCase__ = image_inputs[0]
if isinstance(__lowerCamelCase , Image.Image ):
lowerCamelCase__ , lowerCamelCase__ = image.size
else:
lowerCamelCase__ , lowerCamelCase__ = image.shape[1], image.shape[2]
if w < h:
lowerCamelCase__ = int(self.size["shortest_edge"] * h / w )
lowerCamelCase__ = self.size["shortest_edge"]
elif w > h:
lowerCamelCase__ = self.size["shortest_edge"]
lowerCamelCase__ = int(self.size["shortest_edge"] * w / h )
else:
lowerCamelCase__ = self.size["shortest_edge"]
lowerCamelCase__ = self.size["shortest_edge"]
else:
lowerCamelCase__ = []
for image in image_inputs:
lowerCamelCase__ , lowerCamelCase__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
lowerCamelCase__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[0] )[0]
lowerCamelCase__ = max(__lowerCamelCase , key=lambda __lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( _lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase__ = DetrImageProcessor if is_vision_available() else None
def a__ ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = DetrImageProcessingTester(self )
@property
def a__ ( self : Optional[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self : Optional[Any] ) -> List[Any]:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(__lowerCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__lowerCamelCase , "size" ) )
self.assertTrue(hasattr(__lowerCamelCase , "do_pad" ) )
def a__ ( self : str ) -> int:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
lowerCamelCase__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=__lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , __lowerCamelCase )
def a__ ( self : Any ) -> int:
'''simple docstring'''
pass
def a__ ( self : int ) -> Any:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
lowerCamelCase__ = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCamelCase__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
lowerCamelCase__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
lowerCamelCase__ = image_processing(__lowerCamelCase , return_tensors="pt" ).pixel_values
lowerCamelCase__ , lowerCamelCase__ = self.image_processor_tester.get_expected_values(__lowerCamelCase , batched=__lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self : Union[str, Any] ) -> Any:
'''simple docstring'''
lowerCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
lowerCamelCase__ = json.loads(f.read() )
lowerCamelCase__ = {"image_id": 39769, "annotations": target}
# encode them
lowerCamelCase__ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
lowerCamelCase__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
lowerCamelCase__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCamelCase__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
lowerCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
lowerCamelCase__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCamelCase__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
lowerCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
lowerCamelCase__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify orig_size
lowerCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
lowerCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
@slow
def a__ ( self : str ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
lowerCamelCase__ = json.loads(f.read() )
lowerCamelCase__ = {"file_name": "000000039769.png", "image_id": 39769, "segments_info": target}
lowerCamelCase__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
lowerCamelCase__ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
lowerCamelCase__ = image_processing(images=__lowerCamelCase , annotations=__lowerCamelCase , masks_path=__lowerCamelCase , return_tensors="pt" )
# verify pixel values
lowerCamelCase__ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , __lowerCamelCase )
lowerCamelCase__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __lowerCamelCase , atol=1E-4 ) )
# verify area
lowerCamelCase__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __lowerCamelCase ) )
# verify boxes
lowerCamelCase__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __lowerCamelCase )
lowerCamelCase__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __lowerCamelCase , atol=1E-3 ) )
# verify image_id
lowerCamelCase__ = torch.tensor([39769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __lowerCamelCase ) )
# verify is_crowd
lowerCamelCase__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __lowerCamelCase ) )
# verify class_labels
lowerCamelCase__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __lowerCamelCase ) )
# verify masks
lowerCamelCase__ = 822873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __lowerCamelCase )
# verify orig_size
lowerCamelCase__ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __lowerCamelCase ) )
# verify size
lowerCamelCase__ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __lowerCamelCase ) )
| 187 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__A : Optional[Any] = logging.get_logger(__name__)
__A : int = {
"""google/vit-base-patch16-224""": """https://huggingface.co/vit-base-patch16-224/resolve/main/config.json""",
# See all ViT models at https://huggingface.co/models?filter=vit
}
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = "vit"
def __init__( self : Optional[int] , __lowerCamelCase : Tuple=768 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : Tuple=12 , __lowerCamelCase : int=3072 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : Optional[int]=0.0 , __lowerCamelCase : Tuple=0.0 , __lowerCamelCase : Optional[Any]=0.0_2 , __lowerCamelCase : int=1E-12 , __lowerCamelCase : Tuple=224 , __lowerCamelCase : int=16 , __lowerCamelCase : Optional[int]=3 , __lowerCamelCase : List[str]=True , __lowerCamelCase : str=16 , **__lowerCamelCase : Optional[int] , ) -> str:
'''simple docstring'''
super().__init__(**__lowerCamelCase )
lowerCamelCase__ = hidden_size
lowerCamelCase__ = num_hidden_layers
lowerCamelCase__ = num_attention_heads
lowerCamelCase__ = intermediate_size
lowerCamelCase__ = hidden_act
lowerCamelCase__ = hidden_dropout_prob
lowerCamelCase__ = attention_probs_dropout_prob
lowerCamelCase__ = initializer_range
lowerCamelCase__ = layer_norm_eps
lowerCamelCase__ = image_size
lowerCamelCase__ = patch_size
lowerCamelCase__ = num_channels
lowerCamelCase__ = qkv_bias
lowerCamelCase__ = encoder_stride
class lowercase ( _lowerCamelCase ):
'''simple docstring'''
lowerCAmelCase__ = version.parse("1.11" )
@property
def a__ ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def a__ ( self : Optional[int] ) -> float:
'''simple docstring'''
return 1E-4
| 187 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'facebook/xglm-564M': 'https://huggingface.co/facebook/xglm-564M/resolve/main/config.json',
# See all XGLM models at https://huggingface.co/models?filter=xglm
}
class lowerCamelCase__ ( A__ ):
__lowerCamelCase = """xglm"""
__lowerCamelCase = ["""past_key_values"""]
__lowerCamelCase = {
"""num_attention_heads""": """attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : str , __a : List[Any]=256008 , __a : Tuple=2048 , __a : Any=1024 , __a : Optional[int]=4096 , __a : Any=24 , __a : List[Any]=16 , __a : Dict="gelu" , __a : int=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=0.0 , __a : str=0.0 , __a : str=0.02 , __a : str=True , __a : Dict=True , __a : Union[str, Any]=2 , __a : Union[str, Any]=1 , __a : Union[str, Any]=0 , __a : List[str]=2 , **__a : Union[str, Any] , ):
'''simple docstring'''
lowerCamelCase__: Any = vocab_size
lowerCamelCase__: Union[str, Any] = max_position_embeddings
lowerCamelCase__: List[Any] = d_model
lowerCamelCase__: Dict = ffn_dim
lowerCamelCase__: int = num_layers
lowerCamelCase__: List[str] = attention_heads
lowerCamelCase__: List[Any] = activation_function
lowerCamelCase__: Tuple = dropout
lowerCamelCase__: Union[str, Any] = attention_dropout
lowerCamelCase__: Union[str, Any] = activation_dropout
lowerCamelCase__: Tuple = layerdrop
lowerCamelCase__: Dict = init_std
lowerCamelCase__: Tuple = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase__: Tuple = use_cache
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 306 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowerCamelCase__ ( A__ ):
def __init__( self : Tuple , *__a : Tuple , __a : Dict=None , __a : List[str]=None , **__a : Dict ):
'''simple docstring'''
super().__init__(*__a , **__a )
lowerCamelCase__: str = eval_examples
lowerCamelCase__: Optional[int] = post_process_function
def lowerCamelCase_ ( self : str , __a : Optional[Dataset] = None , __a : List[Any]=None , __a : Optional[List[str]] = None , __a : str = "eval" , **__a : Tuple , ):
'''simple docstring'''
lowerCamelCase__: Tuple = gen_kwargs.copy()
lowerCamelCase__: Union[str, Any] = (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
lowerCamelCase__: Tuple = (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
lowerCamelCase__: Optional[Any] = gen_kwargs
lowerCamelCase__: List[Any] = self.eval_dataset if eval_dataset is None else eval_dataset
lowerCamelCase__: Union[str, Any] = self.get_eval_dataloader(__a )
lowerCamelCase__: Union[str, Any] = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Optional[int] = self.compute_metrics
lowerCamelCase__: Union[str, Any] = None
lowerCamelCase__: Dict = time.time()
lowerCamelCase__: Optional[int] = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: Any = eval_loop(
__a , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: int = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
lowerCamelCase__: Tuple = self.post_process_function(__a , __a , __a )
lowerCamelCase__: List[Any] = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
else:
lowerCamelCase__: int = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__a )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
lowerCamelCase__: List[Any] = self.callback_handler.on_evaluate(self.args , self.state , self.control , __a )
return metrics
def lowerCamelCase_ ( self : str , __a : List[str] , __a : List[Any] , __a : Tuple=None , __a : str = "test" , **__a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = gen_kwargs.copy()
lowerCamelCase__: Optional[Any] = self.get_test_dataloader(__a )
# Temporarily disable metric computation, we will do it in the loop here.
lowerCamelCase__: Any = self.compute_metrics
lowerCamelCase__: Optional[int] = None
lowerCamelCase__: int = time.time()
lowerCamelCase__: Tuple = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
lowerCamelCase__: List[str] = eval_loop(
__a , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__a , metric_key_prefix=__a , )
finally:
lowerCamelCase__: Any = compute_metrics
lowerCamelCase__: Optional[int] = self.args.eval_batch_size * self.args.world_size
if f"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[f"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__a , __a , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
lowerCamelCase__: str = self.post_process_function(__a , __a , __a , """predict""" )
lowerCamelCase__: str = self.compute_metrics(__a )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f"""{metric_key_prefix}_""" ):
lowerCamelCase__: Dict = metrics.pop(__a )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__a )
| 306 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import NezhaConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
)
from transformers.models.nezha.modeling_nezha import NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self: List[Any] , snake_case: Optional[Any] , snake_case: str=13 , snake_case: str=7 , snake_case: Optional[Any]=True , snake_case: int=True , snake_case: int=True , snake_case: Dict=True , snake_case: Union[str, Any]=99 , snake_case: Union[str, Any]=32 , snake_case: Optional[Any]=5 , snake_case: int=4 , snake_case: Dict=37 , snake_case: str="gelu" , snake_case: List[Any]=0.1 , snake_case: Any=0.1 , snake_case: Tuple=128 , snake_case: Union[str, Any]=32 , snake_case: Tuple=16 , snake_case: str=2 , snake_case: List[Any]=0.0_2 , snake_case: Tuple=3 , snake_case: List[Any]=4 , snake_case: Optional[int]=None , ) -> Union[str, Any]:
snake_case_ :List[Any] = parent
snake_case_ :Tuple = batch_size
snake_case_ :int = seq_length
snake_case_ :Tuple = is_training
snake_case_ :Optional[Any] = use_input_mask
snake_case_ :Tuple = use_token_type_ids
snake_case_ :List[str] = use_labels
snake_case_ :str = vocab_size
snake_case_ :Any = hidden_size
snake_case_ :Optional[Any] = num_hidden_layers
snake_case_ :Optional[int] = num_attention_heads
snake_case_ :List[Any] = intermediate_size
snake_case_ :Tuple = hidden_act
snake_case_ :Tuple = hidden_dropout_prob
snake_case_ :Any = attention_probs_dropout_prob
snake_case_ :List[str] = max_position_embeddings
snake_case_ :Any = type_vocab_size
snake_case_ :int = type_sequence_label_size
snake_case_ :int = initializer_range
snake_case_ :str = num_labels
snake_case_ :List[Any] = num_choices
snake_case_ :Union[str, Any] = scope
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ :Any = None
if self.use_input_mask:
snake_case_ :Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case_ :Optional[int] = None
if self.use_token_type_ids:
snake_case_ :Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ :List[str] = None
snake_case_ :Union[str, Any] = None
snake_case_ :List[Any] = None
if self.use_labels:
snake_case_ :Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ :Dict = ids_tensor([self.batch_size] , self.num_choices )
snake_case_ :int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self: Union[str, Any] ) -> Optional[Any]:
return NezhaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
def lowerCAmelCase_ ( self: Optional[Any] ) -> str:
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) :Union[str, Any] = self.prepare_config_and_inputs()
snake_case_ :Optional[int] = True
snake_case_ :Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case_ :int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self: int , snake_case: List[str] , snake_case: Optional[int] , snake_case: List[Any] , snake_case: Dict , snake_case: Optional[Any] , snake_case: List[str] , snake_case: List[Any] ) -> Tuple:
snake_case_ :Tuple = NezhaModel(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
snake_case_ :Union[str, Any] = model(snake_case , token_type_ids=snake_case )
snake_case_ :Tuple = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self: Union[str, Any] , snake_case: Tuple , snake_case: Tuple , snake_case: List[Any] , snake_case: Optional[Any] , snake_case: Any , snake_case: Dict , snake_case: Tuple , snake_case: Tuple , snake_case: Tuple , ) -> Any:
snake_case_ :Optional[Any] = True
snake_case_ :Optional[int] = NezhaModel(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Dict = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , encoder_attention_mask=snake_case , )
snake_case_ :Tuple = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , encoder_hidden_states=snake_case , )
snake_case_ :Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase_ ( self: Optional[Any] , snake_case: Tuple , snake_case: Dict , snake_case: Any , snake_case: List[str] , snake_case: List[Any] , snake_case: List[str] , snake_case: str ) -> str:
snake_case_ :Optional[Any] = NezhaForMaskedLM(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :int = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self: int , snake_case: Tuple , snake_case: Optional[Any] , snake_case: Tuple , snake_case: Any , snake_case: Any , snake_case: Optional[int] , snake_case: List[Any] ) -> Optional[int]:
snake_case_ :List[str] = NezhaForNextSentencePrediction(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Any = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self: Any , snake_case: Union[str, Any] , snake_case: Dict , snake_case: Union[str, Any] , snake_case: Dict , snake_case: Optional[Any] , snake_case: Union[str, Any] , snake_case: str ) -> str:
snake_case_ :Optional[Any] = NezhaForPreTraining(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , next_sentence_label=snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def lowerCAmelCase_ ( self: str , snake_case: int , snake_case: Optional[Any] , snake_case: Optional[int] , snake_case: Dict , snake_case: Tuple , snake_case: int , snake_case: Optional[Any] ) -> Union[str, Any]:
snake_case_ :Optional[int] = NezhaForQuestionAnswering(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :str = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , start_positions=snake_case , end_positions=snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase_ ( self: Any , snake_case: int , snake_case: Union[str, Any] , snake_case: int , snake_case: List[Any] , snake_case: Any , snake_case: Optional[int] , snake_case: Optional[int] ) -> Optional[Any]:
snake_case_ :Any = self.num_labels
snake_case_ :Tuple = NezhaForSequenceClassification(snake_case )
model.to(snake_case )
model.eval()
snake_case_ :str = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase_ ( self: Optional[int] , snake_case: List[Any] , snake_case: str , snake_case: List[str] , snake_case: int , snake_case: Optional[int] , snake_case: List[Any] , snake_case: Any ) -> Dict:
snake_case_ :Any = self.num_labels
snake_case_ :Tuple = NezhaForTokenClassification(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :Optional[int] = model(snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self: List[Any] , snake_case: Optional[int] , snake_case: List[str] , snake_case: Optional[int] , snake_case: int , snake_case: int , snake_case: Dict , snake_case: Tuple ) -> int:
snake_case_ :int = self.num_choices
snake_case_ :int = NezhaForMultipleChoice(config=snake_case )
model.to(snake_case )
model.eval()
snake_case_ :str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ :List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ :Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case_ :Optional[int] = model(
snake_case , attention_mask=snake_case , token_type_ids=snake_case , labels=snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase_ ( self: List[str] ) -> List[str]:
snake_case_ :str = self.prepare_config_and_inputs()
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) :str = config_and_inputs
snake_case_ :Union[str, Any] = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_A : Optional[int] = (
(
NezhaModel,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
)
if is_torch_available()
else ()
)
_A : List[str] = (
{
"""feature-extraction""": NezhaModel,
"""fill-mask""": NezhaForMaskedLM,
"""question-answering""": NezhaForQuestionAnswering,
"""text-classification""": NezhaForSequenceClassification,
"""token-classification""": NezhaForTokenClassification,
"""zero-shot""": NezhaForSequenceClassification,
}
if is_torch_available()
else {}
)
_A : Union[str, Any] = True
def lowerCAmelCase_ ( self: int , snake_case: Union[str, Any] , snake_case: List[str] , snake_case: Optional[Any]=False ) -> Union[str, Any]:
snake_case_ :Tuple = super()._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
if return_labels:
if model_class in get_values(snake_case ):
snake_case_ :Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=snake_case )
snake_case_ :List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case_ :Tuple = NezhaModelTester(self )
snake_case_ :int = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self: Optional[int] ) -> Optional[Any]:
snake_case_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCAmelCase_ ( self: Dict ) -> Optional[int]:
snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*snake_case )
def lowerCAmelCase_ ( self: str ) -> int:
# This regression test was failing with PyTorch < 1.3
(
(
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
), (
snake_case_
),
) :int = self.model_tester.prepare_config_and_inputs_for_decoder()
snake_case_ :List[Any] = None
self.model_tester.create_and_check_model_as_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , )
def lowerCAmelCase_ ( self: Any ) -> List[str]:
snake_case_ :Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> List[Any]:
snake_case_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCAmelCase_ ( self: Any ) -> str:
snake_case_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_next_sequence_prediction(*snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Tuple:
snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*snake_case )
def lowerCAmelCase_ ( self: str ) -> Tuple:
snake_case_ :Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCAmelCase_ ( self: Optional[Any] ) -> Optional[int]:
snake_case_ :Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCAmelCase_ ( self: Any ) -> Optional[int]:
snake_case_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCAmelCase_ ( self: Union[str, Any] ) -> List[str]:
for model_name in NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ :Any = NezhaModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@slow
@require_torch_gpu
def lowerCAmelCase_ ( self: List[str] ) -> Optional[Any]:
snake_case_, snake_case_ :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# NezhaForMultipleChoice behaves incorrectly in JIT environments.
if model_class == NezhaForMultipleChoice:
return
snake_case_ :int = True
snake_case_ :Tuple = model_class(config=snake_case )
snake_case_ :List[Any] = self._prepare_for_class(snake_case , snake_case )
snake_case_ :List[Any] = torch.jit.trace(
snake_case , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(snake_case , os.path.join(snake_case , """bert.pt""" ) )
snake_case_ :Tuple = torch.jit.load(os.path.join(snake_case , """bert.pt""" ) , map_location=snake_case )
loaded(inputs_dict["""input_ids"""].to(snake_case ) , inputs_dict["""attention_mask"""].to(snake_case ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self: Tuple ) -> int:
snake_case_ :Dict = NezhaModel.from_pretrained("""sijunhe/nezha-cn-base""" )
snake_case_ :Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case_ :Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ :int = model(snake_case , attention_mask=snake_case )[0]
snake_case_ :Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape , snake_case )
snake_case_ :Optional[int] = torch.tensor([[[0.0_6_8_5, 0.2_4_4_1, 0.1_1_0_2], [0.0_6_0_0, 0.1_9_0_6, 0.1_3_4_9], [0.0_2_2_1, 0.0_8_1_9, 0.0_5_8_6]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) )
@slow
def lowerCAmelCase_ ( self: str ) -> Any:
snake_case_ :str = NezhaForMaskedLM.from_pretrained("""sijunhe/nezha-cn-base""" )
snake_case_ :Tuple = torch.tensor([[0, 1, 2, 3, 4, 5]] )
snake_case_ :List[str] = torch.tensor([[1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case_ :Tuple = model(snake_case , attention_mask=snake_case )[0]
snake_case_ :int = torch.Size((1, 6, 21_128) )
self.assertEqual(output.shape , snake_case )
snake_case_ :Tuple = torch.tensor(
[[-2.7_9_3_9, -1.7_9_0_2, -2.2_1_8_9], [-2.8_5_8_5, -1.8_9_0_8, -2.3_7_2_3], [-2.6_4_9_9, -1.7_7_5_0, -2.2_5_5_8]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , snake_case , atol=1E-4 ) )
| 310 |
"""simple docstring"""
import argparse
from typing import Dict
import tensorflow as tf
import torch
from tqdm import tqdm
from transformers import BigBirdPegasusConfig, BigBirdPegasusForConditionalGeneration
__a = [
# tf -> hf
("/", "."),
("layer_", "layers."),
("kernel", "weight"),
("beta", "bias"),
("gamma", "weight"),
("pegasus", "model"),
]
__a = [
(".output.dense", ".fc2"),
("intermediate.LayerNorm", "final_layer_norm"),
("intermediate.dense", "fc1"),
]
__a = (
INIT_COMMON
+ [
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.out_proj"),
("attention.self", "self_attn"),
("attention.encdec.LayerNorm", "encoder_attn_layer_norm"),
("attention.encdec_output.dense", "encoder_attn.out_proj"),
("attention.encdec", "encoder_attn"),
("key", "k_proj"),
("value", "v_proj"),
("query", "q_proj"),
("decoder.LayerNorm", "decoder.layernorm_embedding"),
]
+ END_COMMON
)
__a = (
INIT_COMMON
+ [
("embeddings.word_embeddings", "shared.weight"),
("embeddings.position_embeddings", "embed_positions.weight"),
("attention.self.LayerNorm", "self_attn_layer_norm"),
("attention.output.dense", "self_attn.output"),
("attention.self", "self_attn.self"),
("encoder.LayerNorm", "encoder.layernorm_embedding"),
]
+ END_COMMON
)
__a = [
"encdec/key/bias",
"encdec/query/bias",
"encdec/value/bias",
"self/key/bias",
"self/query/bias",
"self/value/bias",
"encdec_output/dense/bias",
"attention/output/dense/bias",
]
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
for tf_name, hf_name in patterns:
snake_case_ :Any = k.replace(_lowercase, _lowercase )
return k
def A_ ( _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :int = BigBirdPegasusConfig(**_lowercase )
snake_case_ :int = BigBirdPegasusForConditionalGeneration(_lowercase )
snake_case_ :List[str] = torch_model.state_dict()
snake_case_ :Dict = {}
# separating decoder weights
snake_case_ :Optional[Any] = {k: tf_weights[k] for k in tf_weights if k.startswith("""pegasus/decoder""" )}
snake_case_ :int = {k: tf_weights[k] for k in tf_weights if not k.startswith("""pegasus/decoder""" )}
for k, v in tqdm(decoder_weights.items(), """tf -> hf conversion""" ):
snake_case_ :List[str] = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
snake_case_ :Optional[int] = DECODER_PATTERNS
snake_case_ :int = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict:
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
snake_case_ :Any = v.T
snake_case_ :Tuple = torch.from_numpy(_lowercase )
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
for k, v in tqdm(remaining_weights.items(), """tf -> hf conversion""" ):
snake_case_ :int = [k.endswith(_lowercase ) for ending in KEYS_TO_IGNORE]
if any(_lowercase ):
continue
snake_case_ :int = REMAINING_PATTERNS
snake_case_ :Optional[int] = rename_state_dict_key(_lowercase, _lowercase )
if new_k not in state_dict and k != "pegasus/embeddings/position_embeddings":
raise ValueError(f"""could not find new key {new_k} in state dict. (converted from {k})""" )
if any(True if i in k else False for i in ["""dense""", """query""", """key""", """value"""] ):
snake_case_ :Tuple = v.T
snake_case_ :str = torch.from_numpy(_lowercase )
if k != "pegasus/embeddings/position_embeddings":
assert v.shape == state_dict[new_k].shape, f"""{new_k}, {k}, {v.shape}, {state_dict[new_k].shape}"""
snake_case_ :Union[str, Any] = mapping["""model.embed_positions.weight"""]
snake_case_ :List[str] = mapping.pop("""model.embed_positions.weight""" )
snake_case_, snake_case_ :Optional[Any] = torch_model.load_state_dict(_lowercase, strict=_lowercase )
snake_case_ :Any = [
k
for k in missing
if k
not in [
"""final_logits_bias""",
"""model.encoder.embed_tokens.weight""",
"""model.decoder.embed_tokens.weight""",
"""lm_head.weight""",
]
]
assert unexpected_missing == [], f"""no matches found for the following torch keys {unexpected_missing}"""
assert extra == [], f"""no matches found for the following tf keys {extra}"""
return torch_model
def A_ ( _lowercase ):
'''simple docstring'''
snake_case_ :List[Any] = tf.train.list_variables(_lowercase )
snake_case_ :Union[str, Any] = {}
snake_case_ :Any = ["""global_step"""]
for name, shape in tqdm(_lowercase, desc="""converting tf checkpoint to dict""" ):
snake_case_ :int = any(pat in name for pat in ignore_name )
if skip_key:
continue
snake_case_ :List[Any] = tf.train.load_variable(_lowercase, _lowercase )
snake_case_ :str = array
return tf_weights
def A_ ( _lowercase, _lowercase, _lowercase ):
'''simple docstring'''
snake_case_ :Any = get_tf_weights_as_numpy(_lowercase )
snake_case_ :Any = convert_bigbird_pegasus(_lowercase, _lowercase )
torch_model.save_pretrained(_lowercase )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument("--tf_ckpt_path", type=str, help="passed to tf.train.list_variables")
parser.add_argument("--save_dir", default=None, type=str, help="Path to the output PyTorch model.")
__a = parser.parse_args()
__a = {}
convert_bigbird_pegasus_ckpt_to_pytorch(args.tf_ckpt_path, args.save_dir, config_update=config_update)
| 310 | 1 |
'''simple docstring'''
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def a__ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : Tuple = 1.5
UpperCAmelCase__ : Union[str, Any] = int(factor * num_class_images )
UpperCAmelCase__ : List[Any] = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 )
os.makedirs(F"""{class_data_dir}/images""" , exist_ok=snake_case__ )
if len(list(Path(F"""{class_data_dir}/images""" ).iterdir() ) ) >= num_class_images:
return
while True:
UpperCAmelCase__ : Dict = client.query(text=snake_case__ )
if len(snake_case__ ) >= factor * num_class_images or num_images > 1E4:
break
else:
UpperCAmelCase__ : Any = int(factor * num_images )
UpperCAmelCase__ : str = ClipClient(
url='''https://knn.laion.ai/knn-service''' , indice_name='''laion_400m''' , num_images=snake_case__ , aesthetic_weight=0.1 , )
UpperCAmelCase__ : Optional[int] = 0
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : List[Any] = tqdm(desc='''downloading real regularization images''' , total=snake_case__ )
with open(F"""{class_data_dir}/caption.txt""" , '''w''' ) as fa, open(F"""{class_data_dir}/urls.txt""" , '''w''' ) as fa, open(
F"""{class_data_dir}/images.txt""" , '''w''' ) as fa:
while total < num_class_images:
UpperCAmelCase__ : Any = class_images[count]
count += 1
try:
UpperCAmelCase__ : Dict = requests.get(images['''url'''] )
if img.status_code == 2_00:
UpperCAmelCase__ : Tuple = Image.open(BytesIO(img.content ) )
with open(F"""{class_data_dir}/images/{total}.jpg""" , '''wb''' ) as f:
f.write(img.content )
fa.write(images['''caption'''] + '''\n''' )
fa.write(images['''url'''] + '''\n''' )
fa.write(F"""{class_data_dir}/images/{total}.jpg""" + '''\n''' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def a__ ( ) -> List[str]:
UpperCAmelCase__ : List[str] = argparse.ArgumentParser('''''' , add_help=snake_case__ )
parser.add_argument('''--class_prompt''' , help='''text prompt to retrieve images''' , required=snake_case__ , type=snake_case__ )
parser.add_argument('''--class_data_dir''' , help='''path to save images''' , required=snake_case__ , type=snake_case__ )
parser.add_argument('''--num_class_images''' , help='''number of images to download''' , default=2_00 , type=snake_case__ )
return parser.parse_args()
if __name__ == "__main__":
UpperCamelCase__ = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 75 |
'''simple docstring'''
import numpy
# List of input, output pairs
lowercase : Any = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
lowercase : str = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
lowercase : Union[str, Any] = [2, 4, 1, 5]
lowercase : Any = len(train_data)
lowercase : Optional[int] = 0.0_09
def lowerCAmelCase_ ( snake_case__ , snake_case__="train" ):
'''simple docstring'''
return calculate_hypothesis_value(snake_case__ , snake_case__ ) - output(
snake_case__ , snake_case__ )
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : Dict = 0
for i in range(len(snake_case__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def lowerCAmelCase_ ( snake_case__ , snake_case__=m ):
'''simple docstring'''
A : List[Any] = 0
for i in range(snake_case__ ):
if index == -1:
summation_value += _error(snake_case__ )
else:
summation_value += _error(snake_case__ ) * train_data[i][0][index]
return summation_value
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
A : List[Any] = summation_of_cost_derivative(snake_case__ , snake_case__ ) / m
return cost_derivative_value
def lowerCAmelCase_ ( ):
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A : Dict = 0.00_00_02
A : Optional[Any] = 0
A : int = 0
while True:
j += 1
A : List[str] = [0, 0, 0, 0]
for i in range(0 , len(snake_case__ ) ):
A : Union[str, Any] = get_cost_derivative(i - 1 )
A : Tuple = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case__ , snake_case__ , atol=snake_case__ , rtol=snake_case__ , ):
break
A : List[Any] = temp_parameter_vector
print(('''Number of iterations:''', j) )
def lowerCAmelCase_ ( ):
'''simple docstring'''
for i in range(len(snake_case__ ) ):
print(('''Actual output value:''', output(snake_case__ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(snake_case__ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('\nTesting gradient descent for a linear hypothesis function.\n')
test_gradient_descent()
| 634 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Any = logging.get_logger(__name__)
__UpperCamelCase : List[Any] = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class UpperCAmelCase_ ( lowercase__ ):
snake_case_ = """trocr"""
snake_case_ = ["""past_key_values"""]
snake_case_ = {
"""num_attention_heads""": """decoder_attention_heads""",
"""hidden_size""": """d_model""",
"""num_hidden_layers""": """decoder_layers""",
}
def __init__( self : Dict , _lowercase : Dict=5_0_2_6_5 , _lowercase : Tuple=1_0_2_4 , _lowercase : int=1_2 , _lowercase : Dict=1_6 , _lowercase : Any=4_0_9_6 , _lowercase : Optional[int]="gelu" , _lowercase : int=5_1_2 , _lowercase : List[Any]=0.1 , _lowercase : Dict=0.0 , _lowercase : int=0.0 , _lowercase : Any=2 , _lowercase : Tuple=0.02 , _lowercase : Dict=0.0 , _lowercase : int=True , _lowercase : Tuple=False , _lowercase : Dict=True , _lowercase : Union[str, Any]=True , _lowercase : Optional[int]=1 , _lowercase : Tuple=0 , _lowercase : str=2 , **_lowercase : Tuple , ) -> str:
_lowercase = vocab_size
_lowercase = d_model
_lowercase = decoder_layers
_lowercase = decoder_attention_heads
_lowercase = decoder_ffn_dim
_lowercase = activation_function
_lowercase = max_position_embeddings
_lowercase = dropout
_lowercase = attention_dropout
_lowercase = activation_dropout
_lowercase = init_std
_lowercase = decoder_layerdrop
_lowercase = use_cache
_lowercase = scale_embedding
_lowercase = use_learned_position_embeddings
_lowercase = layernorm_embedding
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , ) | 227 | """simple docstring"""
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
_enforce_args(_snake_case, _snake_case )
if n == 0:
return 0
_lowercase = float("-inf" )
for i in range(1, n + 1 ):
_lowercase = max(
_snake_case, prices[i - 1] + naive_cut_rod_recursive(n - i, _snake_case ) )
return max_revue
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
_enforce_args(_snake_case, _snake_case )
_lowercase = [float("-inf" ) for _ in range(n + 1 )]
return _top_down_cut_rod_recursive(_snake_case, _snake_case, _snake_case )
def __UpperCAmelCase ( _snake_case : int, _snake_case : list, _snake_case : list ):
if max_rev[n] >= 0:
return max_rev[n]
elif n == 0:
return 0
else:
_lowercase = float("-inf" )
for i in range(1, n + 1 ):
_lowercase = max(
_snake_case, prices[i - 1] + _top_down_cut_rod_recursive(n - i, _snake_case, _snake_case ), )
_lowercase = max_revenue
return max_rev[n]
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
_enforce_args(_snake_case, _snake_case )
# length(max_rev) = n + 1, to accommodate for the revenue obtainable from a rod of
# length 0.
_lowercase = [float("-inf" ) for _ in range(n + 1 )]
_lowercase = 0
for i in range(1, n + 1 ):
_lowercase = max_rev[i]
for j in range(1, i + 1 ):
_lowercase = max(_snake_case, prices[j - 1] + max_rev[i - j] )
_lowercase = max_revenue_i
return max_rev[n]
def __UpperCAmelCase ( _snake_case : int, _snake_case : list ):
if n < 0:
_lowercase = f"""n must be greater than or equal to 0. Got n = {n}"""
raise ValueError(_snake_case )
if n > len(_snake_case ):
_lowercase = (
"Each integral piece of rod must have a corresponding price. "
f"""Got n = {n} but length of prices = {len(_snake_case )}"""
)
raise ValueError(_snake_case )
def __UpperCAmelCase ( ):
_lowercase = [6, 1_0, 1_2, 1_5, 2_0, 2_3]
_lowercase = len(_snake_case )
# the best revenue comes from cutting the rod into 6 pieces, each
# of length 1 resulting in a revenue of 6 * 6 = 36.
_lowercase = 3_6
_lowercase = top_down_cut_rod(_snake_case, _snake_case )
_lowercase = bottom_up_cut_rod(_snake_case, _snake_case )
_lowercase = naive_cut_rod_recursive(_snake_case, _snake_case )
assert expected_max_revenue == max_rev_top_down
assert max_rev_top_down == max_rev_bottom_up
assert max_rev_bottom_up == max_rev_naive
if __name__ == "__main__":
main() | 227 | 1 |
def _snake_case (__lowercase = 50):
UpperCamelCase_ = [1] * (length + 1)
for row_length in range(length + 1):
for tile_length in range(2 , 5):
for tile_start in range(row_length - tile_length + 1):
ways_number[row_length] += ways_number[
row_length - tile_start - tile_length
]
return ways_number[length]
if __name__ == "__main__":
print(f'{solution() = }')
| 23 |
"""simple docstring"""
import os
import tempfile
import unittest
import numpy as np
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax, slow
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
from diffusers import FlaxDDIMScheduler, FlaxDiffusionPipeline, FlaxStableDiffusionPipeline
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
# pipeline has Flax weights
A = FlaxDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase , cache_dir=_lowercase )
A = [t[-1] for t in os.walk(os.path.join(_lowercase , os.listdir(_lowercase )[0] , 'snapshots' ) )]
A = [item for sublist in all_root_files for item in sublist]
# None of the downloaded files should be a PyTorch file even if we have some here:
# https://huggingface.co/hf-internal-testing/tiny-stable-diffusion-pipe/blob/main/unet/diffusion_pytorch_model.bin
assert not any(f.endswith('.bin' ) for f in files )
@slow
@require_flax
class lowerCamelCase__ ( unittest.TestCase ):
def __a ( self : Optional[Any] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'hf-internal-testing/tiny-stable-diffusion-pipe' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 4
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 64, 64, 3)
if jax.device_count() == 8:
assert np.abs(np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 4.1_5_1_4_7_4_5 ) < 1e-3
assert np.abs(np.abs(_lowercase , dtype=np.floataa ).sum() - 4_9_9_4_7.8_7_5 ) < 5e-1
A = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:] ) ) )
assert len(_lowercase ) == num_samples
def __a ( self : Dict ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='flax' , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_5_6_5_2_4_0_1) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_8_3_8_0_8.2) ) < 5e-1
def __a ( self : List[str] ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : str ):
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa )
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_0_0_3_9_0_6) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_7_3_5_1_6.7_5) ) < 5e-1
def __a ( self : Any ):
A = FlaxDDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , set_alpha_to_one=_lowercase , steps_offset=1 , )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , scheduler=_lowercase , safety_checker=_lowercase , )
A = scheduler.create_state()
A = scheduler_state
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.random.PRNGKey(0 )
A = 50
A = jax.device_count()
A = num_samples * [prompt]
A = pipeline.prepare_inputs(_lowercase )
# shard inputs and rng
A = replicate(_lowercase )
A = jax.random.split(_lowercase , _lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
if jax.device_count() == 8:
assert np.abs((np.abs(images[0, 0, :2, :2, -2:] , dtype=np.floataa ).sum() - 0.0_4_5_0_4_3_9_4_5) ) < 1e-3
assert np.abs((np.abs(_lowercase , dtype=np.floataa ).sum() - 2_3_4_7_6_9_3.5) ) < 5e-1
def __a ( self : List[str] ):
A = (
'A cinematic film still of Morgan Freeman starring as Jimi Hendrix, portrait, 40mm lens, shallow depth of'
' field, close up, split lighting, cinematic'
)
A = jax.device_count()
A = num_samples * [prompt]
A = jax.random.split(jax.random.PRNGKey(0 ) , _lowercase )
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# With memory efficient attention
A , A = FlaxStableDiffusionPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='bf16' , dtype=jnp.bfloataa , safety_checker=_lowercase , use_memory_efficient_attention=_lowercase , )
A = replicate(_lowercase )
A = pipeline.prepare_inputs(_lowercase )
A = shard(_lowercase )
A = pipeline(_lowercase , _lowercase , _lowercase , jit=_lowercase ).images
assert images_eff.shape == (num_samples, 1, 512, 512, 3)
A = images[2, 0, 256, 10:17, 1]
# I checked the results visually and they are very similar. However, I saw that the max diff is `1` and the `sum`
# over the 8 images is exactly `256`, which is very suspicious. Testing a random slice for now.
assert abs(slice_eff - slice ).max() < 1e-2
| 690 | 0 |
"""simple docstring"""
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
__lowerCAmelCase = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
__a : Any = hf_hub_download(
repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
__a : str = VideoClassificationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase , top_k=2 )
__a : Optional[int] = [
example_video_filepath,
'''https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4''',
]
return video_classifier, examples
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase ):
for example in examples:
__a : List[Any] = video_classifier(_UpperCAmelCase )
self.assertEqual(
_UpperCAmelCase , [
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
{'''score''': ANY(_UpperCAmelCase ), '''label''': ANY(_UpperCAmelCase )},
] , )
@require_torch
def _lowerCamelCase ( self ):
__a : Tuple = '''hf-internal-testing/tiny-random-VideoMAEForVideoClassification'''
__a : Dict = VideoMAEFeatureExtractor(
size={'''shortest_edge''': 10} , crop_size={'''height''': 10, '''width''': 10} )
__a : Union[str, Any] = pipeline(
'''video-classification''' , model=_UpperCAmelCase , feature_extractor=_UpperCAmelCase , frame_sampling_rate=4 )
__a : str = hf_hub_download(repo_id='''nateraw/video-demo''' , filename='''archery.mp4''' , repo_type='''dataset''' )
__a : Dict = video_classifier(_UpperCAmelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}] , )
__a : str = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
[{'''score''': 0.5_1_9_9, '''label''': '''LABEL_0'''}, {'''score''': 0.4_8_0_1, '''label''': '''LABEL_1'''}],
] , )
@require_tf
def _lowerCamelCase ( self ):
pass | 101 |
"""simple docstring"""
import copy
import re
class __lowercase :
'''simple docstring'''
__lowerCAmelCase = '''hp'''
__lowerCAmelCase = {}
__lowerCAmelCase = None
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase , _UpperCAmelCase ):
__a : Optional[int] = prefix
__a : List[str] = defaults
cls.build_naming_info()
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
if len(_UpperCAmelCase ) == 0:
return ""
__a : Optional[int] = None
if any(char.isdigit() for char in word ):
raise Exception(f"""Parameters should not contain numbers: '{word}' contains a number""" )
if word in info["short_word"]:
return info["short_word"][word]
for prefix_len in range(1 , len(_UpperCAmelCase ) + 1 ):
__a : str = word[:prefix_len]
if prefix in info["reverse_short_word"]:
continue
else:
__a : Tuple = prefix
break
if short_word is None:
# Paranoid fallback
def int_to_alphabetic(_UpperCAmelCase ):
__a : List[str] = ''''''
while integer != 0:
__a : Union[str, Any] = chr(ord('''A''' ) + integer % 10 ) + s
integer //= 10
return s
__a : Optional[int] = 0
while True:
__a : List[str] = word + '''#''' + int_to_alphabetic(_UpperCAmelCase )
if sword in info["reverse_short_word"]:
continue
else:
__a : Optional[Any] = sword
break
__a : List[str] = short_word
__a : Dict = word
return short_word
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : str = param_name.split('''_''' )
__a : str = [TrialShortNamer.shortname_for_word(_UpperCAmelCase , _UpperCAmelCase ) for word in words]
# We try to create a separatorless short name, but if there is a collision we have to fallback
# to a separated short name
__a : Dict = ['''''', '''_''']
for separator in separators:
__a : Union[str, Any] = separator.join(_UpperCAmelCase )
if shortname not in info["reverse_short_param"]:
__a : List[str] = shortname
__a : Union[str, Any] = param_name
return shortname
return param_name
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase ):
__a : int = TrialShortNamer.shortname_for_key(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[Any] = short_name
__a : Optional[Any] = param_name
@classmethod
def _lowerCamelCase ( cls ):
if cls.NAMING_INFO is not None:
return
__a : Optional[Any] = {
'''short_word''': {},
'''reverse_short_word''': {},
'''short_param''': {},
'''reverse_short_param''': {},
}
__a : List[str] = list(cls.DEFAULTS.keys() )
for k in field_keys:
cls.add_new_param_name(_UpperCAmelCase , _UpperCAmelCase )
__a : Optional[Any] = info
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase ):
cls.build_naming_info()
assert cls.PREFIX is not None
__a : str = [copy.copy(cls.PREFIX )]
for k, v in params.items():
if k not in cls.DEFAULTS:
raise Exception(f"""You should provide a default value for the param name {k} with value {v}""" )
if v == cls.DEFAULTS[k]:
# The default value is not added to the name
continue
__a : Union[str, Any] = cls.NAMING_INFO['''short_param'''][k]
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
__a : str = 1 if v else 0
__a : Tuple = '''''' if isinstance(_UpperCAmelCase , (int, float) ) else '''-'''
__a : Dict = f"""{key}{sep}{v}"""
name.append(_UpperCAmelCase )
return "_".join(_UpperCAmelCase )
@classmethod
def _lowerCamelCase ( cls , _UpperCAmelCase ):
__a : List[Any] = repr[len(cls.PREFIX ) + 1 :]
if repr == "":
__a : Tuple = []
else:
__a : str = repr.split('''_''' )
__a : Optional[Any] = {}
for value in values:
if "-" in value:
__a , __a : List[Any] = value.split('''-''' )
else:
__a : int = re.sub('''[0-9.]''' , '''''' , _UpperCAmelCase )
__a : Union[str, Any] = float(re.sub('''[^0-9.]''' , '''''' , _UpperCAmelCase ) )
__a : Dict = cls.NAMING_INFO['''reverse_short_param'''][p_k]
__a : Union[str, Any] = p_v
for k in cls.DEFAULTS:
if k not in parameters:
__a : Optional[int] = cls.DEFAULTS[k]
return parameters | 101 | 1 |
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class UpperCAmelCase__ ( snake_case__ , unittest.TestCase ):
snake_case_ = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def snake_case_ ( self , A__=0 ):
"""simple docstring"""
UpperCAmelCase_: str = floats_tensor((1, 3, 128, 128) , rng=random.Random(A__ ) )
UpperCAmelCase_: str = np.random.RandomState(A__ )
UpperCAmelCase_: List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"strength": 0.75,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Tuple = self.get_dummy_inputs()
UpperCAmelCase_: Optional[int] = pipe(**A__ ).images
UpperCAmelCase_: Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_: List[Any] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_: Optional[int] = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=A__ )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase_: Dict = pipe(**A__ ).images
UpperCAmelCase_: List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_: List[str] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_: Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A__ )
# warmup pass to apply optimizations
UpperCAmelCase_: Tuple = pipe(**self.get_dummy_inputs() )
UpperCAmelCase_: Optional[int] = self.get_dummy_inputs()
UpperCAmelCase_: Tuple = pipe(**A__ ).images
UpperCAmelCase_: Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_: Optional[int] = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_: Tuple = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: List[str] = self.get_dummy_inputs()
UpperCAmelCase_: Optional[int] = pipe(**A__ ).images
UpperCAmelCase_: str = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_: Dict = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_: Optional[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: List[str] = self.get_dummy_inputs()
UpperCAmelCase_: Optional[Any] = pipe(**A__ ).images
UpperCAmelCase_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_: Optional[int] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider="CPUExecutionProvider" )
UpperCAmelCase_: Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: int = self.get_dummy_inputs()
UpperCAmelCase_: Optional[int] = pipe(**A__ ).images
UpperCAmelCase_: int = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
UpperCAmelCase_: Optional[int] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
@nightly
@require_onnxruntime
@require_torch_gpu
class UpperCAmelCase__ ( unittest.TestCase ):
@property
def snake_case_ ( self ):
"""simple docstring"""
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Dict = ort.SessionOptions()
UpperCAmelCase_: Dict = False
return options
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: List[str] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_: str = init_image.resize((768, 512) )
# using the PNDM scheduler by default
UpperCAmelCase_: Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"CompVis/stable-diffusion-v1-4" , revision="onnx" , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Tuple = "A fantasy landscape, trending on artstation"
UpperCAmelCase_: Union[str, Any] = np.random.RandomState(0 )
UpperCAmelCase_: int = pipe(
prompt=A__ , image=A__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=A__ , output_type="np" , )
UpperCAmelCase_: str = output.images
UpperCAmelCase_: Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_: str = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def snake_case_ ( self ):
"""simple docstring"""
UpperCAmelCase_: Any = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/img2img/sketch-mountains-input.jpg" )
UpperCAmelCase_: str = init_image.resize((768, 512) )
UpperCAmelCase_: str = LMSDiscreteScheduler.from_pretrained(
"runwayml/stable-diffusion-v1-5" , subfolder="scheduler" , revision="onnx" )
UpperCAmelCase_: Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , revision="onnx" , scheduler=A__ , safety_checker=A__ , feature_extractor=A__ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=A__ )
UpperCAmelCase_: Union[str, Any] = "A fantasy landscape, trending on artstation"
UpperCAmelCase_: List[str] = np.random.RandomState(0 )
UpperCAmelCase_: Optional[int] = pipe(
prompt=A__ , image=A__ , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=A__ , output_type="np" , )
UpperCAmelCase_: Optional[Any] = output.images
UpperCAmelCase_: Dict = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
UpperCAmelCase_: Tuple = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2 | 137 |
def lowercase ( ) -> int:
return [
a * b * (1000 - a - b)
for a in range(1 ,999 )
for b in range(_a ,999 )
if (a * a + b * b == (1000 - a - b) ** 2)
][0]
if __name__ == "__main__":
print(F"""{solution() = }""") | 137 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class a_ ( unittest.TestCase ):
def __init__( self : List[str] , a_ : int , a_ : List[str]=1_3 , a_ : List[str]=7 , a_ : Optional[Any]=True , a_ : Union[str, Any]=True , a_ : Dict=True , a_ : Any=True , a_ : Tuple=9_9 , a_ : Dict=3_2 , a_ : Tuple=5 , a_ : Dict=4 , a_ : Tuple=3_7 , a_ : Optional[int]="gelu" , a_ : Dict=0.1 , a_ : Union[str, Any]=0.1 , a_ : Union[str, Any]=5_1_2 , a_ : Optional[Any]=1_6 , a_ : str=2 , a_ : int=0.0_2 , a_ : Optional[Any]=4 , ) -> List[Any]:
snake_case: List[str] =parent
snake_case: List[str] =batch_size
snake_case: Tuple =seq_length
snake_case: Optional[Any] =is_training
snake_case: Dict =use_attention_mask
snake_case: str =use_token_type_ids
snake_case: str =use_labels
snake_case: Dict =vocab_size
snake_case: str =hidden_size
snake_case: Dict =num_hidden_layers
snake_case: Union[str, Any] =num_attention_heads
snake_case: Optional[int] =intermediate_size
snake_case: List[Any] =hidden_act
snake_case: Union[str, Any] =hidden_dropout_prob
snake_case: List[Any] =attention_probs_dropout_prob
snake_case: List[Any] =max_position_embeddings
snake_case: List[Any] =type_vocab_size
snake_case: Tuple =type_sequence_label_size
snake_case: List[Any] =initializer_range
snake_case: Any =num_choices
def UpperCamelCase ( self : List[Any] ) -> Dict:
snake_case: int =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case: Dict =None
if self.use_attention_mask:
snake_case: Union[str, Any] =random_attention_mask([self.batch_size, self.seq_length] )
snake_case: Dict =DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_lowerCAmelCase , )
return config, input_ids, attention_mask
def UpperCamelCase ( self : List[str] ) -> str:
snake_case: List[Any] =self.prepare_config_and_inputs()
snake_case , snake_case , snake_case: Optional[Any] =config_and_inputs
snake_case: List[Any] ={'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class a_ ( _lowerCAmelCase , unittest.TestCase ):
UpperCAmelCase : int = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase ( self : Union[str, Any] ) -> int:
snake_case: Union[str, Any] =FlaxDistilBertModelTester(self )
@slow
def UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_class_name in self.all_model_classes:
snake_case: int =model_class_name.from_pretrained('distilbert-base-uncased' )
snake_case: int =model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCAmelCase )
@require_flax
class a_ ( unittest.TestCase ):
@slow
def UpperCamelCase ( self : List[Any] ) -> Tuple:
snake_case: List[str] =FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
snake_case: int =np.array([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case: Dict =np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
snake_case: Optional[int] =model(_lowerCAmelCase , attention_mask=_lowerCAmelCase )[0]
snake_case: Optional[Any] =(1, 1_1, 7_6_8)
self.assertEqual(output.shape , _lowerCAmelCase )
snake_case: str =np.array([[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _lowerCAmelCase , atol=1E-4 ) )
| 709 |
'''simple docstring'''
def a_ ( __UpperCAmelCase ) -> int:
"""simple docstring"""
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
snake_case: Any =f'''Input value of [number={number}] must be an integer'''
raise TypeError(__UpperCAmelCase )
if number < 1:
snake_case: Tuple =f'''Input value of [number={number}] must be > 0'''
raise ValueError(__UpperCAmelCase )
snake_case: int =1
for i in range(1 , __UpperCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 347 | 0 |
def SCREAMING_SNAKE_CASE__ ( UpperCamelCase__: List[Any] ):
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = set({"""(""", """[""", """{"""} )
SCREAMING_SNAKE_CASE__ = set({""")""", """]""", """}"""} )
SCREAMING_SNAKE_CASE__ = {"""{""": """}""", """[""": """]""", """(""": """)"""}
for i in range(len(UpperCamelCase__ ) ):
if s[i] in open_brackets:
stack.append(s[i] )
elif s[i] in closed_brackets and (
len(UpperCamelCase__ ) == 0 or (len(UpperCamelCase__ ) > 0 and open_to_closed[stack.pop()] != s[i])
):
return False
return len(UpperCamelCase__ ) == 0
def SCREAMING_SNAKE_CASE__ ( ):
SCREAMING_SNAKE_CASE__ = input("""Enter sequence of brackets: """ )
if is_balanced(UpperCamelCase__ ):
print(UpperCamelCase__ , """is balanced""" )
else:
print(UpperCamelCase__ , """is not balanced""" )
if __name__ == "__main__":
main() | 6 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class UpperCamelCase_ ( unittest.TestCase ):
def _snake_case ( self :Union[str, Any] ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self :Any ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _snake_case ( self :str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_euler""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def _snake_case ( self :Tuple ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=__A , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2 | 6 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import DistilBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers.models.distilbert.modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertModel,
)
class lowerCAmelCase :
def __init__( self , snake_case__ , ):
lowerCAmelCase : int = parent
lowerCAmelCase : List[Any] = 13
lowerCAmelCase : str = 7
lowerCAmelCase : Any = True
lowerCAmelCase : str = True
lowerCAmelCase : Dict = False
lowerCAmelCase : List[str] = True
lowerCAmelCase : Tuple = 99
lowerCAmelCase : Optional[int] = 32
lowerCAmelCase : Tuple = 2
lowerCAmelCase : Optional[int] = 4
lowerCAmelCase : str = 37
lowerCAmelCase : str = 'gelu'
lowerCAmelCase : List[str] = 0.1
lowerCAmelCase : Union[str, Any] = 0.1
lowerCAmelCase : Optional[int] = 512
lowerCAmelCase : List[str] = 16
lowerCAmelCase : int = 2
lowerCAmelCase : Optional[int] = 0.0_2
lowerCAmelCase : Tuple = 3
lowerCAmelCase : Any = 4
lowerCAmelCase : int = None
def lowercase ( self ):
lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : List[str] = None
if self.use_input_mask:
lowerCAmelCase : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Tuple = None
lowerCAmelCase : Tuple = None
lowerCAmelCase : Tuple = None
if self.use_labels:
lowerCAmelCase : List[str] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase : int = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = TFDistilBertModel(config=__UpperCamelCase )
lowerCAmelCase : Optional[Any] = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : List[str] = model(__UpperCamelCase )
lowerCAmelCase : List[str] = [input_ids, input_mask]
lowerCAmelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Optional[int] = TFDistilBertForMaskedLM(config=__UpperCamelCase )
lowerCAmelCase : Any = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = TFDistilBertForQuestionAnswering(config=__UpperCamelCase )
lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
}
lowerCAmelCase : int = model(__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : int = TFDistilBertForSequenceClassification(__UpperCamelCase )
lowerCAmelCase : Tuple = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : int = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Union[str, Any] = self.num_choices
lowerCAmelCase : List[Any] = TFDistilBertForMultipleChoice(__UpperCamelCase )
lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[int] = tf.tile(tf.expand_dims(__UpperCamelCase , 1 ) , (1, self.num_choices, 1) )
lowerCAmelCase : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
}
lowerCAmelCase : Any = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
lowerCAmelCase : Dict = self.num_labels
lowerCAmelCase : List[Any] = TFDistilBertForTokenClassification(__UpperCamelCase )
lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase : Tuple = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self ):
lowerCAmelCase : Any = self.prepare_config_and_inputs()
((lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase) , (lowerCAmelCase)) : Tuple = config_and_inputs
lowerCAmelCase : int = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase ( a , a , unittest.TestCase ):
_lowerCamelCase : List[Any] = (
(
TFDistilBertModel,
TFDistilBertForMaskedLM,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertForMultipleChoice,
)
if is_tf_available()
else None
)
_lowerCamelCase : List[Any] = (
{
"""feature-extraction""": TFDistilBertModel,
"""fill-mask""": TFDistilBertForMaskedLM,
"""question-answering""": TFDistilBertForQuestionAnswering,
"""text-classification""": TFDistilBertForSequenceClassification,
"""token-classification""": TFDistilBertForTokenClassification,
"""zero-shot""": TFDistilBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase : List[Any] = False
_lowerCamelCase : List[Any] = False
def lowercase ( self ):
lowerCAmelCase : Optional[Any] = TFDistilBertModelTester(self )
lowerCAmelCase : str = ConfigTester(self , config_class=__UpperCamelCase , dim=37 )
def lowercase ( self ):
self.config_tester.run_common_tests()
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__UpperCamelCase )
def lowercase ( self ):
lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__UpperCamelCase )
@slow
def lowercase ( self ):
for model_name in list(TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1] ):
lowerCAmelCase : Dict = TFDistilBertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_tf
class lowerCAmelCase ( unittest.TestCase ):
@slow
def lowercase ( self ):
lowerCAmelCase : List[str] = TFDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowerCAmelCase : List[Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase : List[Any] = model(__UpperCamelCase )[0]
lowerCAmelCase : Union[str, Any] = [1, 6, 768]
self.assertEqual(output.shape , __UpperCamelCase )
lowerCAmelCase : List[Any] = tf.constant(
[
[
[0.1_9_2_6_1_8_8_5, -0.1_3_7_3_2_9_5_5, 0.4_1_1_9_7_9_9],
[0.2_2_1_5_0_1_5_6, -0.0_7_4_2_2_6_6_1, 0.3_9_0_3_7_2_0_4],
[0.2_2_7_5_6_0_1_8, -0.0_8_9_6_4_1_4, 0.3_7_0_1_4_6_7],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 )
| 707 |
'''simple docstring'''
def __UpperCamelCase ( _A : List[str] ) -> Optional[Any]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
lowerCAmelCase , lowerCAmelCase : str = head.next, head
while fast and fast.next:
lowerCAmelCase : Optional[int] = fast.next.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : int = slow.next
lowerCAmelCase : Optional[Any] = None # Don't forget here! But forget still works!
# reverse the second part
lowerCAmelCase : List[Any] = None
while second:
lowerCAmelCase : List[Any] = second.next
lowerCAmelCase : Union[str, Any] = node
lowerCAmelCase : Optional[Any] = second
lowerCAmelCase : Any = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
lowerCAmelCase : Optional[Any] = node.next
lowerCAmelCase : Tuple = head.next
return True
def __UpperCamelCase ( _A : Optional[Any] ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
lowerCAmelCase : Optional[int] = head
while fast and fast.next:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = fast.next.next, slow.next
# 2. Push the second half into the stack
lowerCAmelCase : Tuple = [slow.val]
while slow.next:
lowerCAmelCase : Tuple = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
lowerCAmelCase : Union[str, Any] = cur.next
return True
def __UpperCamelCase ( _A : Tuple ) -> Optional[int]:
"""simple docstring"""
if not head or not head.next:
return True
lowerCAmelCase : Optional[int] = {}
lowerCAmelCase : int = 0
while head:
if head.val in d:
d[head.val].append(_A )
else:
lowerCAmelCase : Any = [pos]
lowerCAmelCase : int = head.next
pos += 1
lowerCAmelCase : str = pos - 1
lowerCAmelCase : Optional[Any] = 0
for v in d.values():
if len(_A ) % 2 != 0:
middle += 1
else:
lowerCAmelCase : Any = 0
for i in range(0 , len(_A ) ):
if v[i] + v[len(_A ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True
| 646 | 0 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class UpperCAmelCase__ ( snake_case ):
"""simple docstring"""
lowerCAmelCase__ : UNetaDModel
lowerCAmelCase__ : ScoreSdeVeScheduler
def __init__( self: Optional[Any] , __lowerCAmelCase: UNetaDModel , __lowerCAmelCase: ScoreSdeVeScheduler ) -> Union[str, Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self: List[Any] , __lowerCAmelCase: int = 1 , __lowerCAmelCase: int = 2_000 , __lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase: Optional[str] = "pil" , __lowerCAmelCase: bool = True , **__lowerCAmelCase: Tuple , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
__UpperCAmelCase = self.unet.config.sample_size
__UpperCAmelCase = (batch_size, 3, img_size, img_size)
__UpperCAmelCase = self.unet
__UpperCAmelCase = randn_tensor(__lowerCAmelCase , generator=__lowerCAmelCase ) * self.scheduler.init_noise_sigma
__UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(__lowerCAmelCase )
self.scheduler.set_sigmas(__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
__UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
__UpperCAmelCase = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
__UpperCAmelCase = self.scheduler.step_correct(__lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase ).prev_sample
# prediction step
__UpperCAmelCase = model(__lowerCAmelCase , __lowerCAmelCase ).sample
__UpperCAmelCase = self.scheduler.step_pred(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , generator=__lowerCAmelCase )
__UpperCAmelCase , __UpperCAmelCase = output.prev_sample, output.prev_sample_mean
__UpperCAmelCase = sample_mean.clamp(0 , 1 )
__UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__UpperCAmelCase = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 221 | import math
import os
import unittest
from transformers import MegatronBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
)
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self: int , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: int=13 , __lowerCAmelCase: Any=7 , __lowerCAmelCase: List[Any]=True , __lowerCAmelCase: Dict=True , __lowerCAmelCase: Union[str, Any]=True , __lowerCAmelCase: List[Any]=True , __lowerCAmelCase: int=99 , __lowerCAmelCase: Dict=64 , __lowerCAmelCase: Optional[Any]=32 , __lowerCAmelCase: Tuple=5 , __lowerCAmelCase: List[str]=4 , __lowerCAmelCase: Tuple=37 , __lowerCAmelCase: Any="gelu" , __lowerCAmelCase: Union[str, Any]=0.1 , __lowerCAmelCase: List[Any]=0.1 , __lowerCAmelCase: int=512 , __lowerCAmelCase: Union[str, Any]=16 , __lowerCAmelCase: Dict=2 , __lowerCAmelCase: Tuple=0.02 , __lowerCAmelCase: Dict=3 , __lowerCAmelCase: Optional[int]=4 , __lowerCAmelCase: Union[str, Any]=None , ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = seq_length
__UpperCAmelCase = is_training
__UpperCAmelCase = use_input_mask
__UpperCAmelCase = use_token_type_ids
__UpperCAmelCase = use_labels
__UpperCAmelCase = vocab_size
__UpperCAmelCase = hidden_size
__UpperCAmelCase = embedding_size
__UpperCAmelCase = num_hidden_layers
__UpperCAmelCase = num_attention_heads
__UpperCAmelCase = intermediate_size
__UpperCAmelCase = hidden_act
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = max_position_embeddings
__UpperCAmelCase = type_vocab_size
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = initializer_range
__UpperCAmelCase = num_labels
__UpperCAmelCase = num_choices
__UpperCAmelCase = scope
def _UpperCAmelCase ( self: Dict ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__UpperCAmelCase = None
if self.use_input_mask:
__UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
__UpperCAmelCase = None
if self.use_token_type_ids:
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__UpperCAmelCase = None
__UpperCAmelCase = None
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
__UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCAmelCase ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
return MegatronBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowerCAmelCase , initializer_range=self.initializer_range , )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: int ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__UpperCAmelCase = model(__lowerCAmelCase , token_type_ids=__lowerCAmelCase )
__UpperCAmelCase = model(__lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _UpperCAmelCase ( self: Optional[int] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: str , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForMaskedLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: Tuple ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForCausalLM(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: Any , __lowerCAmelCase: Tuple , __lowerCAmelCase: Any , __lowerCAmelCase: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Dict , __lowerCAmelCase: Optional[int] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForNextSentencePrediction(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self: Optional[Any] , __lowerCAmelCase: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[int] , __lowerCAmelCase: Dict ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForPreTraining(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , next_sentence_label=__lowerCAmelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _UpperCAmelCase ( self: List[str] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[str] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: int , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: Tuple ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = MegatronBertForQuestionAnswering(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , start_positions=__lowerCAmelCase , end_positions=__lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: int , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: List[Any] , __lowerCAmelCase: int , __lowerCAmelCase: Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForSequenceClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _UpperCAmelCase ( self: str , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Tuple , __lowerCAmelCase: str , __lowerCAmelCase: List[Any] , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.num_labels
__UpperCAmelCase = MegatronBertForTokenClassification(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = model(__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCAmelCase ( self: Any , __lowerCAmelCase: Dict , __lowerCAmelCase: Tuple , __lowerCAmelCase: Tuple , __lowerCAmelCase: Optional[Any] , __lowerCAmelCase: Union[str, Any] , __lowerCAmelCase: str , __lowerCAmelCase: int ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.num_choices
__UpperCAmelCase = MegatronBertForMultipleChoice(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
__UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__UpperCAmelCase = model(
__lowerCAmelCase , attention_mask=__lowerCAmelCase , token_type_ids=__lowerCAmelCase , labels=__lowerCAmelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _UpperCAmelCase ( self: Dict ) -> List[str]:
'''simple docstring'''
__UpperCAmelCase = self.prepare_config_and_inputs()
(
(
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) , (
__UpperCAmelCase
) ,
) = config_and_inputs
__UpperCAmelCase = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( snake_case , snake_case , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ : str = (
(
MegatronBertModel,
MegatronBertForMaskedLM,
MegatronBertForCausalLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase__ : Optional[Any] = (
{
'feature-extraction': MegatronBertModel,
'fill-mask': MegatronBertForMaskedLM,
'question-answering': MegatronBertForQuestionAnswering,
'text-classification': MegatronBertForSequenceClassification,
'text-generation': MegatronBertForCausalLM,
'token-classification': MegatronBertForTokenClassification,
'zero-shot': MegatronBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase__ : Optional[int] = True
# test_resize_embeddings = False
lowerCAmelCase__ : Any = False
def _UpperCAmelCase ( self: int , __lowerCAmelCase: Any , __lowerCAmelCase: str , __lowerCAmelCase: int=False ) -> str:
'''simple docstring'''
__UpperCAmelCase = super()._prepare_for_class(__lowerCAmelCase , __lowerCAmelCase , return_labels=__lowerCAmelCase )
if return_labels:
if model_class in get_values(__lowerCAmelCase ):
__UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__lowerCAmelCase )
__UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__lowerCAmelCase )
return inputs_dict
def _UpperCAmelCase ( self: int ) -> Any:
'''simple docstring'''
__UpperCAmelCase = MegatronBertModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__lowerCAmelCase , hidden_size=37 )
def _UpperCAmelCase ( self: List[Any] ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _UpperCAmelCase ( self: List[Any] ) -> Optional[int]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_model(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_masked_lm(*__lowerCAmelCase )
def _UpperCAmelCase ( self: str ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_multiple_choice(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Tuple ) -> List[Any]:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_next_sequence_prediction(*__lowerCAmelCase )
def _UpperCAmelCase ( self: List[str] ) -> Dict:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_pretraining(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Union[str, Any] ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_question_answering(*__lowerCAmelCase )
def _UpperCAmelCase ( self: Optional[int] ) -> Any:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_sequence_classification(*__lowerCAmelCase )
def _UpperCAmelCase ( self: int ) -> Tuple:
'''simple docstring'''
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_megatron_bert_for_token_classification(*__lowerCAmelCase )
def __lowerCAmelCase ( A_ : List[str] ) -> List[Any]:
return torch.tensor(
A_ , dtype=torch.long , device=A_ , )
a_ = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip("Model is not available." )
def _UpperCAmelCase ( self: List[Any] ) -> int:
'''simple docstring'''
__UpperCAmelCase = "nvidia/megatron-bert-uncased-345m"
if "MYDIR" in os.environ:
__UpperCAmelCase = os.path.join(os.environ["MYDIR"] , __lowerCAmelCase )
__UpperCAmelCase = MegatronBertModel.from_pretrained(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.half()
__UpperCAmelCase = _long_tensor([[101, 7_110, 1_005, 1_056, 2_023, 11_333, 17_413, 1_029, 102]] )
with torch.no_grad():
__UpperCAmelCase = model(__lowerCAmelCase )[0]
__UpperCAmelCase = torch.Size((1, 9, 1_024) )
self.assertEqual(output.shape , __lowerCAmelCase )
__UpperCAmelCase = [-0.6040, -0.2517, -0.1025, 0.3420, -0.6758, -0.0017, -0.1089, -0.1990, 0.5728]
for ii in range(3 ):
for jj in range(3 ):
__UpperCAmelCase = output[0, ii, jj]
__UpperCAmelCase = expected[3 * ii + jj]
__UpperCAmelCase = "ii={} jj={} a={} b={}".format(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
self.assertTrue(math.isclose(__lowerCAmelCase , __lowerCAmelCase , rel_tol=__lowerCAmelCase , abs_tol=__lowerCAmelCase ) , msg=__lowerCAmelCase )
| 221 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_pegasus import PegasusTokenizer
else:
_lowerCamelCase : Optional[int] = None
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : List[Any] = "▁"
_lowerCamelCase : str = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
_lowerCamelCase : List[Any] = {
"vocab_file": {"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/spiece.model"},
"tokenizer_file": {
"google/pegasus-xsum": "https://huggingface.co/google/pegasus-xsum/resolve/main/tokenizer.json"
},
}
_lowerCamelCase : Dict = {
"google/pegasus-xsum": 512,
}
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = VOCAB_FILES_NAMES
_SCREAMING_SNAKE_CASE = PRETRAINED_VOCAB_FILES_MAP
_SCREAMING_SNAKE_CASE = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_SCREAMING_SNAKE_CASE = PegasusTokenizer
_SCREAMING_SNAKE_CASE = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : int="</s>" , UpperCamelCase__ : int="<unk>" , UpperCamelCase__ : Union[str, Any]="<mask_2>" , UpperCamelCase__ : List[str]="<mask_1>" , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Dict=1_0_3 , **UpperCamelCase__ : Dict , ):
"""simple docstring"""
UpperCamelCase = offset
if additional_special_tokens is not None:
if not isinstance(UpperCamelCase__ , UpperCamelCase__ ):
raise TypeError(
f"""additional_special_tokens should be of type {type(UpperCamelCase__ )}, but is"""
f""" {type(UpperCamelCase__ )}""" )
UpperCamelCase = (
([mask_token_sent] + additional_special_tokens)
if mask_token_sent not in additional_special_tokens and mask_token_sent is not None
else additional_special_tokens
)
# fill additional tokens with ..., <unk_token_102> in case not all additional tokens are already taken
additional_special_tokens_extended += [
f"""<unk_{i}>""" for i in range(len(UpperCamelCase__ ) , self.offset - 1 )
]
if len(set(UpperCamelCase__ ) ) != len(UpperCamelCase__ ):
raise ValueError(
'Please make sure that the provided additional_special_tokens do not contain an incorrectly'
f""" shifted list of <unk_x> tokens. Found {additional_special_tokens_extended}.""" )
UpperCamelCase = additional_special_tokens_extended
else:
UpperCamelCase = [mask_token_sent] if mask_token_sent is not None else []
additional_special_tokens += [f"""<unk_{i}>""" for i in range(2 , self.offset )]
super().__init__(
UpperCamelCase__ , tokenizer_file=UpperCamelCase__ , pad_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , mask_token_sent=UpperCamelCase__ , offset=UpperCamelCase__ , additional_special_tokens=UpperCamelCase__ , **UpperCamelCase__ , )
UpperCamelCase = vocab_file
UpperCamelCase = False if not self.vocab_file else True
def A ( self : str , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
UpperCamelCase = set(self.all_special_ids ) # call it once instead of inside list comp
all_special_ids.remove(self.unk_token_id ) # <unk> is only sometimes special
if all_special_ids != set(range(len(self.additional_special_tokens ) + 3 ) ):
raise ValueError(
'There should be 3 special tokens: mask_token, pad_token, and eos_token +'
f""" {len(self.additional_special_tokens )} additional_special_tokens, but got {all_special_ids}""" )
return [1 if x in all_special_ids else 0 for x in seq]
def A ( self : Union[str, Any] , UpperCamelCase__ : List , UpperCamelCase__ : Optional[List] = None , UpperCamelCase__ : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return self._special_token_mask(UpperCamelCase__ )
elif token_ids_a is None:
return self._special_token_mask(UpperCamelCase__ ) + [1]
else:
return self._special_token_mask(token_ids_a + token_ids_a ) + [1]
def A ( self : int , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any]=None ):
"""simple docstring"""
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def A ( self : int , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCamelCase = os.path.join(
UpperCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ):
copyfile(self.vocab_file , UpperCamelCase__ )
return (out_vocab_file,)
| 324 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
while a != 0:
UpperCamelCase , UpperCamelCase = b % a, a
return b
def __lowerCamelCase ( A__ , A__ ) -> int:
"""simple docstring"""
if gcd(A__ , A__ ) != 1:
UpperCamelCase = F"""mod inverse of {a!r} and {m!r} does not exist"""
raise ValueError(A__ )
UpperCamelCase , UpperCamelCase , UpperCamelCase = 1, 0, a
UpperCamelCase , UpperCamelCase , UpperCamelCase = 0, 1, m
while va != 0:
UpperCamelCase = ua // va
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase = (ua - q * va), (ua - q * va), (ua - q * va), va, va, va
return ua % m
| 324 | 1 |
"""simple docstring"""
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase : int = logging.get_logger(__name__)
__lowercase : Optional[Any] = {
"""kakaobrain/align-base""": """https://huggingface.co/kakaobrain/align-base/resolve/main/config.json""",
}
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[int] = "align_text_model"
def __init__( self , UpperCamelCase__=30_522 , UpperCamelCase__=768 , UpperCamelCase__=12 , UpperCamelCase__=12 , UpperCamelCase__=3_072 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=512 , UpperCamelCase__=2 , UpperCamelCase__=0.02 , UpperCamelCase__=1e-12 , UpperCamelCase__=0 , UpperCamelCase__="absolute" , UpperCamelCase__=True , **UpperCamelCase__ , ) -> List[str]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = vocab_size
lowerCamelCase_ = hidden_size
lowerCamelCase_ = num_hidden_layers
lowerCamelCase_ = num_attention_heads
lowerCamelCase_ = hidden_act
lowerCamelCase_ = intermediate_size
lowerCamelCase_ = hidden_dropout_prob
lowerCamelCase_ = attention_probs_dropout_prob
lowerCamelCase_ = max_position_embeddings
lowerCamelCase_ = type_vocab_size
lowerCamelCase_ = initializer_range
lowerCamelCase_ = layer_norm_eps
lowerCamelCase_ = position_embedding_type
lowerCamelCase_ = use_cache
lowerCamelCase_ = pad_token_id
@classmethod
def _lowerCAmelCase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase_ = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Union[str, Any] = "align_vision_model"
def __init__( self , UpperCamelCase__ = 3 , UpperCamelCase__ = 600 , UpperCamelCase__ = 2.0 , UpperCamelCase__ = 3.1 , UpperCamelCase__ = 8 , UpperCamelCase__ = [3, 3, 5, 3, 5, 5, 3] , UpperCamelCase__ = [32, 16, 24, 40, 80, 112, 192] , UpperCamelCase__ = [16, 24, 40, 80, 112, 192, 320] , UpperCamelCase__ = [] , UpperCamelCase__ = [1, 2, 2, 2, 1, 2, 1] , UpperCamelCase__ = [1, 2, 2, 3, 3, 4, 1] , UpperCamelCase__ = [1, 6, 6, 6, 6, 6, 6] , UpperCamelCase__ = 0.25 , UpperCamelCase__ = "swish" , UpperCamelCase__ = 2_560 , UpperCamelCase__ = "mean" , UpperCamelCase__ = 0.02 , UpperCamelCase__ = 0.001 , UpperCamelCase__ = 0.99 , UpperCamelCase__ = 0.2 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
lowerCamelCase_ = num_channels
lowerCamelCase_ = image_size
lowerCamelCase_ = width_coefficient
lowerCamelCase_ = depth_coefficient
lowerCamelCase_ = depth_divisor
lowerCamelCase_ = kernel_sizes
lowerCamelCase_ = in_channels
lowerCamelCase_ = out_channels
lowerCamelCase_ = depthwise_padding
lowerCamelCase_ = strides
lowerCamelCase_ = num_block_repeats
lowerCamelCase_ = expand_ratios
lowerCamelCase_ = squeeze_expansion_ratio
lowerCamelCase_ = hidden_act
lowerCamelCase_ = hidden_dim
lowerCamelCase_ = pooling_type
lowerCamelCase_ = initializer_range
lowerCamelCase_ = batch_norm_eps
lowerCamelCase_ = batch_norm_momentum
lowerCamelCase_ = drop_connect_rate
lowerCamelCase_ = sum(UpperCamelCase__ ) * 4
@classmethod
def _lowerCAmelCase ( cls , UpperCamelCase__ , **UpperCamelCase__ ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(UpperCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ = cls.get_config_dict(UpperCamelCase__ , **UpperCamelCase__ )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('''model_type''' ) == "align":
lowerCamelCase_ = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCamelCase__ , **UpperCamelCase__ )
class lowerCAmelCase ( a ):
"""simple docstring"""
__lowercase :Optional[Any] = "align"
__lowercase :Union[str, Any] = True
def __init__( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=640 , UpperCamelCase__=1.0 , UpperCamelCase__=0.02 , **UpperCamelCase__ , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
if text_config is None:
lowerCamelCase_ = {}
logger.info('''text_config is None. Initializing the AlignTextConfig with default values.''' )
if vision_config is None:
lowerCamelCase_ = {}
logger.info('''vision_config is None. Initializing the AlignVisionConfig with default values.''' )
lowerCamelCase_ = AlignTextConfig(**UpperCamelCase__ )
lowerCamelCase_ = AlignVisionConfig(**UpperCamelCase__ )
lowerCamelCase_ = projection_dim
lowerCamelCase_ = temperature_init_value
lowerCamelCase_ = initializer_range
@classmethod
def _lowerCAmelCase ( cls , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCamelCase__ )
def _lowerCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ = copy.deepcopy(self.__dict__ )
lowerCamelCase_ = self.text_config.to_dict()
lowerCamelCase_ = self.vision_config.to_dict()
lowerCamelCase_ = self.__class__.model_type
return output | 142 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
__lowercase : Any = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def lowerCamelCase_ ( _lowerCamelCase : Optional[int] ):
if isinstance(_lowerCamelCase , torch.Tensor ):
return image
elif isinstance(_lowerCamelCase , PIL.Image.Image ):
lowerCamelCase_ = [image]
lowerCamelCase_ = [trans(img.convert('''RGB''' ) ) for img in image]
lowerCamelCase_ = torch.stack(_lowerCamelCase )
return image
class lowerCAmelCase ( a ):
"""simple docstring"""
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
lowerCamelCase_ = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCamelCase__ , scheduler=UpperCamelCase__ )
def _lowerCAmelCase ( self , UpperCamelCase__ ) -> Dict:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
lowerCamelCase_ = min(int(num_inference_steps * strength ) , UpperCamelCase__ )
lowerCamelCase_ = max(num_inference_steps - init_timestep , 0 )
lowerCamelCase_ = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowerCAmelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None ) -> List[str]:
'''simple docstring'''
if not isinstance(UpperCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(UpperCamelCase__ )}""" )
lowerCamelCase_ = image.to(device=UpperCamelCase__ , dtype=UpperCamelCase__ )
if isinstance(UpperCamelCase__ , UpperCamelCase__ ) and len(UpperCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(UpperCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
lowerCamelCase_ = init_latents.shape
lowerCamelCase_ = randn_tensor(UpperCamelCase__ , generator=UpperCamelCase__ , device=UpperCamelCase__ , dtype=UpperCamelCase__ )
# get latents
print('''add noise to latents at timestep''' , UpperCamelCase__ )
lowerCamelCase_ = self.scheduler.add_noise(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase_ = init_latents
return latents
@torch.no_grad()
def __call__( self , UpperCamelCase__ = None , UpperCamelCase__ = 0.8 , UpperCamelCase__ = 1 , UpperCamelCase__ = None , UpperCamelCase__ = 0.0 , UpperCamelCase__ = 50 , UpperCamelCase__ = None , UpperCamelCase__ = "pil" , UpperCamelCase__ = True , ) -> Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(UpperCamelCase__ )
# 2. Preprocess image
lowerCamelCase_ = preprocess(UpperCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(UpperCamelCase__ , device=self.device )
lowerCamelCase_ , lowerCamelCase_ = self.get_timesteps(UpperCamelCase__ , UpperCamelCase__ , self.device )
lowerCamelCase_ = timesteps[:1].repeat(UpperCamelCase__ )
# 4. Prepare latent variables
lowerCamelCase_ = self.prepare_latents(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , self.unet.dtype , self.device , UpperCamelCase__ )
lowerCamelCase_ = latents
# 5. Denoising loop
for t in self.progress_bar(UpperCamelCase__ ):
# 1. predict noise model_output
lowerCamelCase_ = self.unet(UpperCamelCase__ , UpperCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
lowerCamelCase_ = self.scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , eta=UpperCamelCase__ , use_clipped_model_output=UpperCamelCase__ , generator=UpperCamelCase__ , ).prev_sample
lowerCamelCase_ = (image / 2 + 0.5).clamp(0 , 1 )
lowerCamelCase_ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ = self.numpy_to_pil(UpperCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=UpperCamelCase__ ) | 142 | 1 |
class lowerCamelCase_ :
def __init__( self , __lowerCAmelCase ):
"""simple docstring"""
__magic_name__ :int = val
__magic_name__ :int = None
__magic_name__ :Optional[int] = None
def A ( self , __lowerCAmelCase ):
"""simple docstring"""
if self.val:
if val < self.val:
if self.left is None:
__magic_name__ :List[Any] = Node(__lowerCAmelCase )
else:
self.left.insert(__lowerCAmelCase )
elif val > self.val:
if self.right is None:
__magic_name__ :Any = Node(__lowerCAmelCase )
else:
self.right.insert(__lowerCAmelCase )
else:
__magic_name__ :Optional[Any] = val
def __lowercase ( snake_case, snake_case ):
"""simple docstring"""
if root:
inorder(root.left, snake_case )
res.append(root.val )
inorder(root.right, snake_case )
def __lowercase ( snake_case ):
"""simple docstring"""
if len(snake_case ) == 0:
return arr
__magic_name__ :Any = Node(arr[0] )
for i in range(1, len(snake_case ) ):
root.insert(arr[i] )
# Traverse BST in order.
__magic_name__ :int = []
inorder(snake_case, snake_case )
return res
if __name__ == "__main__":
print(tree_sort([10, 1, 3, 2, 9, 14, 13]))
| 180 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_ ( unittest.TestCase ):
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :Dict = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=('''DownBlock2D''', '''AttnDownBlock2D''') , up_block_types=('''AttnUpBlock2D''', '''UpBlock2D''') , )
return model
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :List[str] = VQModel(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=3 , )
return model
@property
def A ( self ):
"""simple docstring"""
torch.manual_seed(0 )
__magic_name__ :int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__lowerCAmelCase )
def A ( self ):
"""simple docstring"""
__magic_name__ :Optional[Any] = self.dummy_uncond_unet
__magic_name__ :Optional[int] = DDIMScheduler()
__magic_name__ :List[str] = self.dummy_vq_model
__magic_name__ :Tuple = LDMPipeline(unet=__lowerCAmelCase , vqvae=__lowerCAmelCase , scheduler=__lowerCAmelCase )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
__magic_name__ :List[Any] = torch.manual_seed(0 )
__magic_name__ :List[str] = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' ).images
__magic_name__ :List[Any] = torch.manual_seed(0 )
__magic_name__ :Any = ldm(generator=__lowerCAmelCase , num_inference_steps=2 , output_type='''numpy''' , return_dict=__lowerCAmelCase )[0]
__magic_name__ :Any = image[0, -3:, -3:, -1]
__magic_name__ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
__magic_name__ :Union[str, Any] = np.array([0.8512, 0.818, 0.6411, 0.6808, 0.4465, 0.5618, 0.46, 0.6231, 0.5172] )
__magic_name__ :Any = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class lowerCamelCase_ ( unittest.TestCase ):
def A ( self ):
"""simple docstring"""
__magic_name__ :Tuple = LDMPipeline.from_pretrained('''CompVis/ldm-celebahq-256''' )
ldm.to(__lowerCAmelCase )
ldm.set_progress_bar_config(disable=__lowerCAmelCase )
__magic_name__ :Optional[Any] = torch.manual_seed(0 )
__magic_name__ :Optional[int] = ldm(generator=__lowerCAmelCase , num_inference_steps=5 , output_type='''numpy''' ).images
__magic_name__ :Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
__magic_name__ :List[str] = np.array([0.4399, 0.44975, 0.46825, 0.474, 0.4359, 0.4581, 0.45095, 0.4341, 0.4447] )
__magic_name__ :Tuple = 1E-2 if torch_device != '''mps''' else 3E-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
| 180 | 1 |
'''simple docstring'''
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
lowerCamelCase_ = {'''vocab_file''': '''vocab.txt'''}
lowerCamelCase_ = {
'''vocab_file''': {
'''openbmb/cpm-ant-10b''': '''https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt''',
},
}
lowerCamelCase_ = {
'''openbmb/cpm-ant-10b''': 10_24,
}
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = collections.OrderedDict()
with open(__lowercase , "r" , encoding="utf-8" ) as reader:
_A = reader.readlines()
for index, token in enumerate(__lowercase ):
_A = token.rstrip("\n" )
_A = index
return vocab
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
def __init__( self : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str]="<unk>" , __UpperCAmelCase : Optional[Any]=200 ):
'''simple docstring'''
_A = vocab
_A = unk_token
_A = max_input_chars_per_word
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = list(__UpperCAmelCase )
if len(__UpperCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
_A = 0
_A = []
while start < len(__UpperCAmelCase ):
_A = len(__UpperCAmelCase )
_A = None
while start < end:
_A = "".join(chars[start:end] )
if substr in self.vocab:
_A = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(__UpperCAmelCase )
_A = end
return sub_tokens
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = VOCAB_FILES_NAMES
snake_case = PRETRAINED_VOCAB_FILES_MAP
snake_case = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case = ['''input_ids''', '''attention_mask''']
snake_case = False
def __init__( self : Tuple , __UpperCAmelCase : Any , __UpperCAmelCase : Union[str, Any]="<d>" , __UpperCAmelCase : Optional[Any]="</d>" , __UpperCAmelCase : Union[str, Any]="<s>" , __UpperCAmelCase : Dict="</s>" , __UpperCAmelCase : List[str]="<pad>" , __UpperCAmelCase : Any="<unk>" , __UpperCAmelCase : Optional[Any]="</n>" , __UpperCAmelCase : Tuple="</_>" , __UpperCAmelCase : Optional[int]="left" , **__UpperCAmelCase : Union[str, Any] , ):
'''simple docstring'''
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=__UpperCAmelCase , eod_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , line_token=__UpperCAmelCase , space_token=__UpperCAmelCase , padding_side=__UpperCAmelCase , **__UpperCAmelCase , )
_A = bod_token
_A = eod_token
_A = load_vocab(__UpperCAmelCase )
_A = self.encoder[space_token]
_A = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
_A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCAmelCase : x[1] ) )
_A = {v: k for k, v in self.encoder.items()}
_A = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def lowerCAmelCase ( self : List[str] ):
'''simple docstring'''
return self.encoder[self.bod_token]
@property
def lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.encoder[self.eod_token]
@property
def lowerCAmelCase ( self : str ):
'''simple docstring'''
return self.encoder["\n"]
@property
def lowerCAmelCase ( self : Dict ):
'''simple docstring'''
return len(self.encoder )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : int ):
'''simple docstring'''
_A = []
for x in jieba.cut(__UpperCAmelCase , cut_all=__UpperCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(__UpperCAmelCase ) )
return output_tokens
def lowerCAmelCase ( self : Any , __UpperCAmelCase : List[str] , **__UpperCAmelCase : str ):
'''simple docstring'''
_A = [i for i in token_ids if i >= 0]
_A = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(__UpperCAmelCase , **__UpperCAmelCase )
def lowerCAmelCase ( self : Any , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
return token in self.encoder
def lowerCAmelCase ( self : int , __UpperCAmelCase : List[str] ):
'''simple docstring'''
return "".join(__UpperCAmelCase )
def lowerCAmelCase ( self : Optional[Any] , __UpperCAmelCase : Dict ):
'''simple docstring'''
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowerCAmelCase ( self : str , __UpperCAmelCase : Union[str, Any] ):
'''simple docstring'''
return self.decoder.get(__UpperCAmelCase , self.unk_token )
def lowerCAmelCase ( self : List[str] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ):
'''simple docstring'''
if os.path.isdir(__UpperCAmelCase ):
_A = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
_A = (filename_prefix + "-" if filename_prefix else "") + save_directory
_A = 0
if " " in self.encoder:
_A = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
_A = self.encoder["\n"]
del self.encoder["\n"]
_A = collections.OrderedDict(sorted(self.encoder.items() , key=lambda __UpperCAmelCase : x[1] ) )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
_A = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def lowerCAmelCase ( self : Optional[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : List[int] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def lowerCAmelCase ( self : Dict , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None , __UpperCAmelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCAmelCase , token_ids_a=__UpperCAmelCase , already_has_special_tokens=__UpperCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCAmelCase )) + [1] + ([0] * len(__UpperCAmelCase ))
return [1] + ([0] * len(__UpperCAmelCase ))
| 330 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def __lowercase ( __lowercase ) -> Tuple:
'''simple docstring'''
_A = botoa.client("iam" )
_A = {
"Version": "2012-10-17",
"Statement": [
{"Effect": "Allow", "Principal": {"Service": "sagemaker.amazonaws.com"}, "Action": "sts:AssumeRole"}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowercase , AssumeRolePolicyDocument=json.dumps(__lowercase , indent=2 ) )
_A = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"sagemaker:*",
"ecr:GetDownloadUrlForLayer",
"ecr:BatchGetImage",
"ecr:BatchCheckLayerAvailability",
"ecr:GetAuthorizationToken",
"cloudwatch:PutMetricData",
"cloudwatch:GetMetricData",
"cloudwatch:GetMetricStatistics",
"cloudwatch:ListMetrics",
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
"logs:GetLogEvents",
"s3:CreateBucket",
"s3:ListBucket",
"s3:GetBucketLocation",
"s3:GetObject",
"s3:PutObject",
],
"Resource": "*",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowercase , PolicyName=F'''{role_name}_policy_permission''' , PolicyDocument=json.dumps(__lowercase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(F'''role {role_name} already exists. Using existing one''' )
def __lowercase ( __lowercase ) -> str:
'''simple docstring'''
_A = botoa.client("iam" )
return iam_client.get_role(RoleName=__lowercase )["Role"]["Arn"]
def __lowercase ( ) -> List[str]:
'''simple docstring'''
_A = _ask_options(
"How do you want to authorize?" , ["AWS Profile", "Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) "] , __lowercase , )
_A = None
if credentials_configuration == 0:
_A = _ask_field("Enter your AWS Profile name: [default] " , default="default" )
_A = aws_profile
else:
print(
"Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"
"`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`" )
_A = _ask_field("AWS Access Key ID: " )
_A = aws_access_key_id
_A = _ask_field("AWS Secret Access Key: " )
_A = aws_secret_access_key
_A = _ask_field("Enter your AWS Region: [us-east-1]" , default="us-east-1" )
_A = aws_region
_A = _ask_options(
"Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?" , ["Provide IAM Role name", "Create new IAM role using credentials"] , __lowercase , )
if role_management == 0:
_A = _ask_field("Enter your IAM role name: " )
else:
_A = "accelerate_sagemaker_execution_role"
print(F'''Accelerate will create an iam role "{iam_role_name}" using the provided credentials''' )
_create_iam_role_for_sagemaker(__lowercase )
_A = _ask_field(
"Do you want to use custom Docker image? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowercase , error_message="Please enter yes or no." , )
_A = None
if is_custom_docker_image:
_A = _ask_field("Enter your Docker image: " , lambda __lowercase : str(__lowercase ).lower() )
_A = _ask_field(
"Do you want to provide SageMaker input channels with data locations? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowercase , error_message="Please enter yes or no." , )
_A = None
if is_sagemaker_inputs_enabled:
_A = _ask_field(
"Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): " , lambda __lowercase : str(__lowercase ).lower() , )
_A = _ask_field(
"Do you want to enable SageMaker metrics? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowercase , error_message="Please enter yes or no." , )
_A = None
if is_sagemaker_metrics_enabled:
_A = _ask_field(
"Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): " , lambda __lowercase : str(__lowercase ).lower() , )
_A = _ask_options(
"What is the distributed mode?" , ["No distributed training", "Data parallelism"] , _convert_sagemaker_distributed_mode , )
_A = {}
_A = _ask_field(
"Do you wish to optimize your script with torch dynamo?[yes/NO]:" , _convert_yes_no_to_bool , default=__lowercase , error_message="Please enter yes or no." , )
if use_dynamo:
_A = "dynamo_"
_A = _ask_options(
"Which dynamo backend would you like to use?" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_A = _ask_field(
"Do you want to customize the defaults sent to torch.compile? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowercase , error_message="Please enter yes or no." , )
if use_custom_options:
_A = _ask_options(
"Which mode do you want to use?" , __lowercase , lambda __lowercase : TORCH_DYNAMO_MODES[int(__lowercase )] , default="default" , )
_A = _ask_field(
"Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowercase , error_message="Please enter yes or no." , )
_A = _ask_field(
"Do you want to enable dynamic shape tracing? [yes/NO]: " , _convert_yes_no_to_bool , default=__lowercase , error_message="Please enter yes or no." , )
_A = "Which EC2 instance type you want to use for your training?"
if distributed_type != SageMakerDistributedType.NO:
_A = _ask_options(
__lowercase , __lowercase , lambda __lowercase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowercase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_A = _ask_field(__lowercase , lambda __lowercase : str(__lowercase ).lower() , default="ml.p3.2xlarge" )
_A = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_A = _ask_field(
"How many machines do you want use? [1]: " , __lowercase , default=1 , )
_A = _ask_options(
"Do you wish to use FP16 or BF16 (mixed precision)?" , ["no", "fp16", "bf16", "fp8"] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts." )
return SageMakerConfig(
image_uri=__lowercase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowercase , use_cpu=__lowercase , dynamo_config=__lowercase , eca_instance_type=__lowercase , profile=__lowercase , region=__lowercase , iam_role_name=__lowercase , mixed_precision=__lowercase , num_machines=__lowercase , sagemaker_inputs_file=__lowercase , sagemaker_metrics_file=__lowercase , )
| 330 | 1 |
'''simple docstring'''
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class UpperCAmelCase_ ( snake_case__ ):
UpperCAmelCase_ = (DPMSolverSDEScheduler,)
UpperCAmelCase_ = 1_0
def snake_case__ ( self , **lowercase_):
snake_case_ : Dict = {
"num_train_timesteps": 11_00,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"noise_sampler_seed": 0,
}
config.update(**lowercase_)
return config
def snake_case__ ( self):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=lowercase_)
def snake_case__ ( self):
for beta_start, beta_end in zip([0.00_001, 0.0_001, 0.001] , [0.0_002, 0.002, 0.02]):
self.check_over_configs(beta_start=lowercase_ , beta_end=lowercase_)
def snake_case__ ( self):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=lowercase_)
def snake_case__ ( self):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=lowercase_)
def snake_case__ ( self):
snake_case_ : str = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config()
snake_case_ : Union[str, Any] = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
snake_case_ : Union[str, Any] = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Tuple = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
snake_case_ : List[Any] = scheduler.scale_model_input(lowercase_ , lowercase_)
snake_case_ : List[str] = model(lowercase_ , lowercase_)
snake_case_ : Dict = scheduler.step(lowercase_ , lowercase_ , lowercase_)
snake_case_ : Union[str, Any] = output.prev_sample
snake_case_ : List[Any] = torch.sum(torch.abs(lowercase_))
snake_case_ : Optional[int] = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.47_821_044_921_875) < 1E-2
assert abs(result_mean.item() - 0.2_178_705_964_565_277) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_352_111_816_406) < 1E-2
assert abs(result_mean.item() - 0.22_342_906_892_299_652) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326) < 1E-3
def snake_case__ ( self):
snake_case_ : Any = self.scheduler_classes[0]
snake_case_ : Optional[int] = self.get_scheduler_config(prediction_type="v_prediction")
snake_case_ : Any = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps)
snake_case_ : Any = self.dummy_model()
snake_case_ : Union[str, Any] = self.dummy_sample_deter * scheduler.init_noise_sigma
snake_case_ : Optional[int] = sample.to(lowercase_)
for i, t in enumerate(scheduler.timesteps):
snake_case_ : List[Any] = scheduler.scale_model_input(lowercase_ , lowercase_)
snake_case_ : List[str] = model(lowercase_ , lowercase_)
snake_case_ : str = scheduler.step(lowercase_ , lowercase_ , lowercase_)
snake_case_ : Union[str, Any] = output.prev_sample
snake_case_ : Any = torch.sum(torch.abs(lowercase_))
snake_case_ : Optional[int] = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 124.77_149_200_439_453) < 1E-2
assert abs(result_mean.item() - 0.16_226_289_014_816_284) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 128.1_663_360_595_703) < 1E-2
assert abs(result_mean.item() - 0.16_688_326_001_167_297) < 1E-3
else:
assert abs(result_sum.item() - 119.8_487_548_828_125) < 1E-2
assert abs(result_mean.item() - 0.1_560_530_662_536_621) < 1E-3
def snake_case__ ( self):
snake_case_ : int = self.scheduler_classes[0]
snake_case_ : Union[str, Any] = self.get_scheduler_config()
snake_case_ : List[str] = scheduler_class(**lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
snake_case_ : List[Any] = self.dummy_model()
snake_case_ : Optional[Any] = self.dummy_sample_deter.to(lowercase_) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
snake_case_ : str = scheduler.scale_model_input(lowercase_ , lowercase_)
snake_case_ : Any = model(lowercase_ , lowercase_)
snake_case_ : Optional[int] = scheduler.step(lowercase_ , lowercase_ , lowercase_)
snake_case_ : Optional[Any] = output.prev_sample
snake_case_ : List[str] = torch.sum(torch.abs(lowercase_))
snake_case_ : Optional[int] = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 167.46_957_397_460_938) < 1E-2
assert abs(result_mean.item() - 0.21_805_934_607_982_635) < 1E-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 171.59_353_637_695_312) < 1E-2
assert abs(result_mean.item() - 0.22_342_908_382_415_771) < 1E-3
else:
assert abs(result_sum.item() - 162.52_383_422_851_562) < 1E-2
assert abs(result_mean.item() - 0.211_619_570_851_326) < 1E-3
def snake_case__ ( self):
snake_case_ : Tuple = self.scheduler_classes[0]
snake_case_ : List[str] = self.get_scheduler_config()
snake_case_ : int = scheduler_class(**lowercase_ , use_karras_sigmas=lowercase_)
scheduler.set_timesteps(self.num_inference_steps , device=lowercase_)
snake_case_ : Optional[int] = self.dummy_model()
snake_case_ : List[str] = self.dummy_sample_deter.to(lowercase_) * scheduler.init_noise_sigma
snake_case_ : str = sample.to(lowercase_)
for t in scheduler.timesteps:
snake_case_ : int = scheduler.scale_model_input(lowercase_ , lowercase_)
snake_case_ : int = model(lowercase_ , lowercase_)
snake_case_ : Any = scheduler.step(lowercase_ , lowercase_ , lowercase_)
snake_case_ : int = output.prev_sample
snake_case_ : Optional[int] = torch.sum(torch.abs(lowercase_))
snake_case_ : List[str] = torch.mean(torch.abs(lowercase_))
if torch_device in ["mps"]:
assert abs(result_sum.item() - 176.66_974_135_742_188) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811) < 1E-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 177.63_653_564_453_125) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811) < 1E-2
else:
assert abs(result_sum.item() - 170.3_135_223_388_672) < 1E-2
assert abs(result_mean.item() - 0.23_003_872_730_981_811) < 1E-2
| 92 |
'''simple docstring'''
import argparse
from typing import List
import evaluate
import numpy as np
import torch
from datasets import DatasetDict, load_dataset
# New Code #
# We'll be using StratifiedKFold for this example
from sklearn.model_selection import StratifiedKFold
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to perform Cross Validation,
# and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ = 16
a_ = 32
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE = 1_6 ):
"""simple docstring"""
snake_case_ : Union[str, Any] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case_ : int = DatasetDict(
{
"train": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"validation": dataset["train"].select(__SCREAMING_SNAKE_CASE ),
"test": dataset["validation"],
} )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
snake_case_ : str = tokenizer(examples["sentence1"], examples["sentence2"], truncation=__SCREAMING_SNAKE_CASE, max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case_ : List[Any] = datasets.map(
__SCREAMING_SNAKE_CASE, batched=__SCREAMING_SNAKE_CASE, remove_columns=["idx", "sentence1", "sentence2"], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case_ : Dict = tokenized_datasets.rename_column("label", "labels" )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case_ : Dict = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case_ : Optional[int] = 1_6
elif accelerator.mixed_precision != "no":
snake_case_ : Tuple = 8
else:
snake_case_ : Union[str, Any] = None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE, padding="longest", max_length=__SCREAMING_SNAKE_CASE, pad_to_multiple_of=__SCREAMING_SNAKE_CASE, return_tensors="pt", )
# Instantiate dataloaders.
snake_case_ : Optional[int] = DataLoader(
tokenized_datasets["train"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : int = DataLoader(
tokenized_datasets["validation"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = DataLoader(
tokenized_datasets["test"], shuffle=__SCREAMING_SNAKE_CASE, collate_fn=__SCREAMING_SNAKE_CASE, batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader, test_dataloader
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = []
# Download the dataset
snake_case_ : Tuple = load_dataset("glue", "mrpc" )
# Create our splits
snake_case_ : Union[str, Any] = StratifiedKFold(n_splits=int(args.num_folds ) )
# Initialize accelerator
snake_case_ : Optional[Any] = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case_ : Optional[Any] = config["lr"]
snake_case_ : str = int(config["num_epochs"] )
snake_case_ : Tuple = int(config["seed"] )
snake_case_ : Optional[Any] = int(config["batch_size"] )
snake_case_ : List[Any] = evaluate.load("glue", "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case_ : List[str] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case_ : Union[str, Any] = batch_size // MAX_GPU_BATCH_SIZE
snake_case_ : Union[str, Any] = MAX_GPU_BATCH_SIZE
set_seed(__SCREAMING_SNAKE_CASE )
# New Code #
# Create our folds:
snake_case_ : int = kfold.split(np.zeros(datasets["train"].num_rows ), datasets["train"]["label"] )
snake_case_ : Optional[int] = []
# Iterate over them
for i, (train_idxs, valid_idxs) in enumerate(__SCREAMING_SNAKE_CASE ):
snake_case_ , snake_case_ , snake_case_ : Tuple = get_fold_dataloaders(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased", return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case_ : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
snake_case_ : Any = AdamW(params=model.parameters(), lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
snake_case_ : List[str] = get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE, num_warmup_steps=1_0_0, num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ : str = accelerator.prepare(
__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case_ : List[Any] = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Union[str, Any] = outputs.loss
snake_case_ : Union[str, Any] = loss / gradient_accumulation_steps
accelerator.backward(__SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : Dict = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : str = outputs.logits.argmax(dim=-1 )
snake_case_ , snake_case_ : Dict = accelerator.gather_for_metrics((predictions, batch["labels"]) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE, )
snake_case_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:', __SCREAMING_SNAKE_CASE )
# New Code #
# We also run predictions on the test set at the very end
snake_case_ : Any = []
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case_ : int = model(**__SCREAMING_SNAKE_CASE )
snake_case_ : Tuple = outputs.logits
snake_case_ , snake_case_ : List[Any] = accelerator.gather_for_metrics((predictions, batch["labels"]) )
fold_predictions.append(predictions.cpu() )
if i == 0:
# We need all of the test predictions
test_references.append(references.cpu() )
# Use accelerator.print to print only on the main process.
test_predictions.append(torch.cat(__SCREAMING_SNAKE_CASE, dim=0 ) )
# We now need to release all our memory and get rid of the current model, optimizer, etc
accelerator.free_memory()
# New Code #
# Finally we check the accuracy of our folded results:
snake_case_ : List[Any] = torch.cat(__SCREAMING_SNAKE_CASE, dim=0 )
snake_case_ : Any = torch.stack(__SCREAMING_SNAKE_CASE, dim=0 ).sum(dim=0 ).div(int(args.num_folds ) ).argmax(dim=-1 )
snake_case_ : Tuple = metric.compute(predictions=__SCREAMING_SNAKE_CASE, references=__SCREAMING_SNAKE_CASE )
accelerator.print("Average test metrics from all folds:", __SCREAMING_SNAKE_CASE )
def UpperCamelCase_ ( ):
"""simple docstring"""
snake_case_ : Tuple = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision", type=__SCREAMING_SNAKE_CASE, default=__SCREAMING_SNAKE_CASE, choices=["no", "fp16", "bf16", "fp8"], help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU.", )
parser.add_argument("--cpu", action="store_true", help="If passed, will train on the CPU." )
# New Code #
parser.add_argument("--num_folds", type=__SCREAMING_SNAKE_CASE, default=3, help="The number of splits to perform across the dataset" )
snake_case_ : List[Any] = parser.parse_args()
snake_case_ : Optional[Any] = {"lr": 2E-5, "num_epochs": 3, "seed": 4_2, "batch_size": 1_6}
training_function(__SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 92 | 1 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = """"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = """hf-legacy""" # "hf://"" is reserved for hffs
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Optional[int]:
super().__init__(self , **lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = repo_info
SCREAMING_SNAKE_CASE = token
SCREAMING_SNAKE_CASE = None
def __A ( self ) -> Any:
if self.dir_cache is None:
SCREAMING_SNAKE_CASE = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
SCREAMING_SNAKE_CASE = {
'name': hf_file.rfilename,
'size': None,
'type': 'file',
}
self.dir_cache.update(
{
str(lowerCAmelCase__ ): {'name': str(lowerCAmelCase__ ), 'size': None, 'type': 'directory'}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__ = "rb" , **lowerCAmelCase__ , ) -> Any:
if not isinstance(self.repo_info , lowerCAmelCase__ ):
raise NotImplementedError(F'Open is only implemented for dataset repositories, but got {self.repo_info}' )
SCREAMING_SNAKE_CASE = hf_hub_url(self.repo_info.id , lowerCAmelCase__ , revision=self.repo_info.sha )
return fsspec.open(
lowerCAmelCase__ , mode=lowerCAmelCase__ , headers=get_authentication_headers_for_url(lowerCAmelCase__ , use_auth_token=self.token ) , client_kwargs={'trust_env': True} , ).open()
def __A ( self , lowerCAmelCase__ , **lowerCAmelCase__ ) -> Optional[Any]:
self._get_dirs()
SCREAMING_SNAKE_CASE = self._strip_protocol(lowerCAmelCase__ )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(lowerCAmelCase__ )
def __A ( self , lowerCAmelCase__ , lowerCAmelCase__=False , **lowerCAmelCase__ ) -> Optional[int]:
self._get_dirs()
SCREAMING_SNAKE_CASE = PurePosixPath(path.strip('/' ) )
SCREAMING_SNAKE_CASE = {}
for p, f in self.dir_cache.items():
SCREAMING_SNAKE_CASE = PurePosixPath(p.strip('/' ) )
SCREAMING_SNAKE_CASE = p.parent
if root == path:
SCREAMING_SNAKE_CASE = f
SCREAMING_SNAKE_CASE = list(paths.values() )
if detail:
return out
else:
return sorted(f['name'] for f in out )
| 247 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowerCAmelCase ( lowerCamelCase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Dict = ["""image_processor""", """tokenizer"""]
SCREAMING_SNAKE_CASE_ : Tuple = """FlavaImageProcessor"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> int:
SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE = kwargs.pop('feature_extractor' )
SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE = self.image_processor
def __call__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = True , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = None , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = False , lowerCAmelCase__ = True , lowerCAmelCase__ = None , **lowerCAmelCase__ , ) -> Any:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
SCREAMING_SNAKE_CASE = self.tokenizer(
text=lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , padding=lowerCAmelCase__ , truncation=lowerCAmelCase__ , max_length=lowerCAmelCase__ , stride=lowerCAmelCase__ , pad_to_multiple_of=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_overflowing_tokens=lowerCAmelCase__ , return_special_tokens_mask=lowerCAmelCase__ , return_offsets_mapping=lowerCAmelCase__ , return_length=lowerCAmelCase__ , verbose=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if images is not None:
SCREAMING_SNAKE_CASE = self.image_processor(
lowerCAmelCase__ , return_image_mask=lowerCAmelCase__ , return_codebook_pixels=lowerCAmelCase__ , return_tensors=lowerCAmelCase__ , **lowerCAmelCase__ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase__ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase__ ) , tensor_type=lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Union[str, Any]:
return self.tokenizer.batch_decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
def __A ( self , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> Tuple:
return self.tokenizer.decode(*lowerCAmelCase__ , **lowerCAmelCase__ )
@property
def __A ( self ) -> Tuple:
SCREAMING_SNAKE_CASE = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def __A ( self ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowerCAmelCase__ , )
return self.image_processor_class
@property
def __A ( self ) -> str:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowerCAmelCase__ , )
return self.image_processor
| 247 | 1 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from importlib import import_module
from typing import Dict, List, Optional, Tuple
import numpy as np
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch import nn
from utils_ner import Split, TokenClassificationDataset, TokenClassificationTask
import transformers
from transformers import (
AutoConfig,
AutoModelForTokenClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__A = logging.getLogger(__name__)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
lowercase = field(
default=UpperCAmelCase_ ,metadata={'help': 'Pretrained config name or path if not the same as model_name'})
lowercase = field(
default='NER' ,metadata={'help': 'Task type to fine tune in training (e.g. NER, POS, etc)'})
lowercase = field(
default=UpperCAmelCase_ ,metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
lowercase = field(default=UpperCAmelCase_ ,metadata={'help': 'Set this flag to use fast tokenization.'})
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
lowercase = field(
default=UpperCAmelCase_ ,metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} ,)
@dataclass
class __snake_case :
"""simple docstring"""
lowercase = field(
metadata={'help': 'The input data dir. Should contain the .txt files for a CoNLL-2003-formatted task.'})
lowercase = field(
default=UpperCAmelCase_ ,metadata={'help': 'Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.'} ,)
lowercase = field(
default=1_28 ,metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} ,)
lowercase = field(
default=UpperCAmelCase_ ,metadata={'help': 'Overwrite the cached training and evaluation sets'})
def UpperCamelCase_ ( ):
'''simple docstring'''
lowerCAmelCase_ : str = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : int = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Union[str, Any] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
""" --overwrite_output_dir to overcome.""" )
lowerCAmelCase_ : List[str] = import_module("""tasks""" )
try:
lowerCAmelCase_ : Tuple = getattr(UpperCamelCase__ , model_args.task_type )
lowerCAmelCase_ : Dict = token_classification_task_clazz()
except AttributeError:
raise ValueError(
f'Task {model_args.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
f'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCamelCase__ )
# Set seed
set_seed(training_args.seed )
# Prepare CONLL-2003 task
lowerCAmelCase_ : Union[str, Any] = token_classification_task.get_labels(data_args.labels )
lowerCAmelCase_ : str = dict(enumerate(UpperCamelCase__ ) )
lowerCAmelCase_ : Any = len(UpperCamelCase__ )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCAmelCase_ : Optional[Any] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=UpperCamelCase__ , idalabel=UpperCamelCase__ , labelaid={label: i for i, label in enumerate(UpperCamelCase__ )} , cache_dir=model_args.cache_dir , )
lowerCAmelCase_ : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast , )
lowerCAmelCase_ : List[Any] = AutoModelForTokenClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCamelCase__ , cache_dir=model_args.cache_dir , )
# Get datasets
lowerCAmelCase_ : Optional[int] = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
lowerCAmelCase_ : Dict = (
TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def align_predictions(A__ : Optional[int] , A__ : Any ) -> Tuple[List[int], List[int]]:
lowerCAmelCase_ : Union[str, Any] = np.argmax(UpperCamelCase__ , axis=2 )
lowerCAmelCase_, lowerCAmelCase_ : List[Any] = preds.shape
lowerCAmelCase_ : List[Any] = [[] for _ in range(UpperCamelCase__ )]
lowerCAmelCase_ : Any = [[] for _ in range(UpperCamelCase__ )]
for i in range(UpperCamelCase__ ):
for j in range(UpperCamelCase__ ):
if label_ids[i, j] != nn.CrossEntropyLoss().ignore_index:
out_label_list[i].append(label_map[label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
return preds_list, out_label_list
def compute_metrics(A__ : Optional[int] ) -> Dict:
lowerCAmelCase_, lowerCAmelCase_ : List[str] = align_predictions(p.predictions , p.label_ids )
return {
"accuracy_score": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"precision": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"recall": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"f1": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
# Data collator
lowerCAmelCase_ : Any = DataCollatorWithPadding(UpperCamelCase__ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
lowerCAmelCase_ : int = Trainer(
model=UpperCamelCase__ , args=UpperCamelCase__ , train_dataset=UpperCamelCase__ , eval_dataset=UpperCamelCase__ , compute_metrics=UpperCamelCase__ , data_collator=UpperCamelCase__ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_process_zero():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
lowerCAmelCase_ : str = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
lowerCAmelCase_ : List[str] = trainer.evaluate()
lowerCAmelCase_ : Optional[Any] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , UpperCamelCase__ , UpperCamelCase__ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(UpperCamelCase__ )
# Predict
if training_args.do_predict:
lowerCAmelCase_ : Optional[int] = TokenClassificationDataset(
token_classification_task=UpperCamelCase__ , data_dir=data_args.data_dir , tokenizer=UpperCamelCase__ , labels=UpperCamelCase__ , model_type=config.model_type , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.test , )
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = trainer.predict(UpperCamelCase__ )
lowerCAmelCase_, lowerCAmelCase_ : str = align_predictions(UpperCamelCase__ , UpperCamelCase__ )
lowerCAmelCase_ : Optional[int] = os.path.join(training_args.output_dir , """test_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
for key, value in metrics.items():
logger.info(""" %s = %s""" , UpperCamelCase__ , UpperCamelCase__ )
writer.write("""%s = %s\n""" % (key, value) )
# Save predictions
lowerCAmelCase_ : Optional[int] = os.path.join(training_args.output_dir , """test_predictions.txt""" )
if trainer.is_world_process_zero():
with open(UpperCamelCase__ , """w""" ) as writer:
with open(os.path.join(data_args.data_dir , """test.txt""" ) , """r""" ) as f:
token_classification_task.write_predictions_to_file(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
return results
def UpperCamelCase_ ( A__ : Tuple ):
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 703 |
'''simple docstring'''
class __snake_case :
"""simple docstring"""
def __init__( self : Optional[Any] ) -> str:
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Optional[int] = 0
lowerCAmelCase_ : Optional[Any] = {}
def __lowercase ( self : Union[str, Any] , lowerCamelCase : Any ) -> Optional[Any]:
if vertex not in self.adjacency:
lowerCAmelCase_ : List[Any] = {}
self.num_vertices += 1
def __lowercase ( self : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : Optional[Any] ) -> Dict:
self.add_vertex(lowerCamelCase )
self.add_vertex(lowerCamelCase )
if head == tail:
return
lowerCAmelCase_ : List[Any] = weight
lowerCAmelCase_ : Tuple = weight
def __lowercase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase_ : Any = self.get_edges()
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Optional[int] = edge
edges.remove((tail, head, weight) )
for i in range(len(lowerCamelCase ) ):
lowerCAmelCase_ : List[str] = list(edges[i] )
edges.sort(key=lambda lowerCamelCase : e[2] )
for i in range(len(lowerCamelCase ) - 1 ):
if edges[i][2] >= edges[i + 1][2]:
lowerCAmelCase_ : Any = edges[i][2] + 1
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[str] = edge
lowerCAmelCase_ : Optional[int] = weight
lowerCAmelCase_ : Optional[int] = weight
def __str__( self : Optional[Any] ) -> Any:
lowerCAmelCase_ : List[str] = """"""
for tail in self.adjacency:
for head in self.adjacency[tail]:
lowerCAmelCase_ : List[Any] = self.adjacency[head][tail]
string += F'{head} -> {tail} == {weight}\n'
return string.rstrip("""\n""" )
def __lowercase ( self : Tuple ) -> Dict:
lowerCAmelCase_ : Optional[Any] = []
for tail in self.adjacency:
for head in self.adjacency[tail]:
output.append((tail, head, self.adjacency[head][tail]) )
return output
def __lowercase ( self : int ) -> int:
return self.adjacency.keys()
@staticmethod
def __lowercase ( lowerCamelCase : Tuple=None , lowerCamelCase : Union[str, Any]=None ) -> Any:
lowerCAmelCase_ : str = Graph()
if vertices is None:
lowerCAmelCase_ : Dict = []
if edges is None:
lowerCAmelCase_ : List[str] = []
for vertex in vertices:
g.add_vertex(lowerCamelCase )
for edge in edges:
g.add_edge(*lowerCamelCase )
return g
class __snake_case :
"""simple docstring"""
def __init__( self : str ) -> List[Any]:
lowerCAmelCase_ : Union[str, Any] = {}
lowerCAmelCase_ : Optional[Any] = {}
def __len__( self : Optional[Any] ) -> int:
return len(self.parent )
def __lowercase ( self : Optional[Any] , lowerCamelCase : Optional[Any] ) -> List[str]:
if item in self.parent:
return self.find(lowerCamelCase )
lowerCAmelCase_ : List[Any] = item
lowerCAmelCase_ : Dict = 0
return item
def __lowercase ( self : Optional[Any] , lowerCamelCase : int ) -> Optional[Any]:
if item not in self.parent:
return self.make_set(lowerCamelCase )
if item != self.parent[item]:
lowerCAmelCase_ : List[str] = self.find(self.parent[item] )
return self.parent[item]
def __lowercase ( self : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : Dict ) -> List[str]:
lowerCAmelCase_ : List[str] = self.find(lowerCamelCase )
lowerCAmelCase_ : List[str] = self.find(lowerCamelCase )
if roota == roota:
return roota
if self.rank[roota] > self.rank[roota]:
lowerCAmelCase_ : Optional[Any] = roota
return roota
if self.rank[roota] < self.rank[roota]:
lowerCAmelCase_ : int = roota
return roota
if self.rank[roota] == self.rank[roota]:
self.rank[roota] += 1
lowerCAmelCase_ : Any = roota
return roota
return None
@staticmethod
def __lowercase ( lowerCamelCase : int ) -> List[str]:
lowerCAmelCase_ : Optional[int] = graph.num_vertices
lowerCAmelCase_ : Tuple = Graph.UnionFind()
lowerCAmelCase_ : int = []
while num_components > 1:
lowerCAmelCase_ : str = {}
for vertex in graph.get_vertices():
lowerCAmelCase_ : int = -1
lowerCAmelCase_ : int = graph.get_edges()
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = edge
edges.remove((tail, head, weight) )
for edge in edges:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : List[Any] = edge
lowerCAmelCase_ : List[str] = union_find.find(lowerCamelCase )
lowerCAmelCase_ : List[str] = union_find.find(lowerCamelCase )
if seta != seta:
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCAmelCase_ : List[Any] = [head, tail, weight]
if cheap_edge[seta] == -1 or cheap_edge[seta][2] > weight:
lowerCAmelCase_ : str = [head, tail, weight]
for vertex in cheap_edge:
if cheap_edge[vertex] != -1:
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ : Dict = cheap_edge[vertex]
if union_find.find(lowerCamelCase ) != union_find.find(lowerCamelCase ):
union_find.union(lowerCamelCase , lowerCamelCase )
mst_edges.append(cheap_edge[vertex] )
lowerCAmelCase_ : Tuple = num_components - 1
lowerCAmelCase_ : Tuple = Graph.build(edges=lowerCamelCase )
return mst
| 398 | 0 |
def lowerCamelCase__ ( _lowercase = 10 , _lowercase = 22 ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = range(1 , _lowercase )
UpperCAmelCase_ : Optional[int] = range(1 , _lowercase )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(10, 22) = }""") | 30 |
from ... import PretrainedConfig
__A : int = {
'''sijunhe/nezha-cn-base''': '''https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json''',
}
class __A ( lowerCAmelCase ):
lowerCAmelCase_ : List[Any] = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ : str = "nezha"
def __init__( self : Optional[int] , UpperCAmelCase_ : Optional[int]=21128 , UpperCAmelCase_ : Any=768 , UpperCAmelCase_ : Tuple=12 , UpperCAmelCase_ : Union[str, Any]=12 , UpperCAmelCase_ : int=3072 , UpperCAmelCase_ : Optional[int]="gelu" , UpperCAmelCase_ : int=0.1 , UpperCAmelCase_ : List[Any]=0.1 , UpperCAmelCase_ : Optional[int]=512 , UpperCAmelCase_ : Tuple=64 , UpperCAmelCase_ : int=2 , UpperCAmelCase_ : Union[str, Any]=0.02 , UpperCAmelCase_ : Dict=1E-12 , UpperCAmelCase_ : Dict=0.1 , UpperCAmelCase_ : List[Any]=0 , UpperCAmelCase_ : str=2 , UpperCAmelCase_ : Optional[int]=3 , UpperCAmelCase_ : Optional[Any]=True , **UpperCAmelCase_ : Any , ):
super().__init__(pad_token_id=UpperCAmelCase_ , bos_token_id=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_ , **UpperCAmelCase_ )
lowerCAmelCase : Dict = vocab_size
lowerCAmelCase : Union[str, Any] = hidden_size
lowerCAmelCase : Tuple = num_hidden_layers
lowerCAmelCase : Tuple = num_attention_heads
lowerCAmelCase : List[str] = hidden_act
lowerCAmelCase : Tuple = intermediate_size
lowerCAmelCase : Any = hidden_dropout_prob
lowerCAmelCase : Any = attention_probs_dropout_prob
lowerCAmelCase : List[Any] = max_position_embeddings
lowerCAmelCase : Tuple = max_relative_position
lowerCAmelCase : Tuple = type_vocab_size
lowerCAmelCase : int = initializer_range
lowerCAmelCase : int = layer_norm_eps
lowerCAmelCase : List[str] = classifier_dropout
lowerCAmelCase : Optional[Any] = use_cache
| 343 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE = '▁'
__SCREAMING_SNAKE_CASE = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__SCREAMING_SNAKE_CASE = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__SCREAMING_SNAKE_CASE = {'vinai/bartpho-syllable': 1_024}
class a__ ( A__ ):
UpperCAmelCase__ = VOCAB_FILES_NAMES
UpperCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase__ = ['''input_ids''', '''attention_mask''']
def __init__( self :Optional[Any] , _lowerCamelCase :Optional[Any] , _lowerCamelCase :Any , _lowerCamelCase :Dict="<s>" , _lowerCamelCase :List[Any]="</s>" , _lowerCamelCase :int="</s>" , _lowerCamelCase :Optional[Any]="<s>" , _lowerCamelCase :Tuple="<unk>" , _lowerCamelCase :Dict="<pad>" , _lowerCamelCase :str="<mask>" , _lowerCamelCase :Optional[Dict[str, Any]] = None , **_lowerCamelCase :Tuple , ):
'''simple docstring'''
UpperCamelCase_ : int =AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase , rstrip=_lowerCamelCase ) if isinstance(_lowerCamelCase , _lowerCamelCase ) else mask_token
UpperCamelCase_ : Tuple ={} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_lowerCamelCase , eos_token=_lowerCamelCase , unk_token=_lowerCamelCase , sep_token=_lowerCamelCase , cls_token=_lowerCamelCase , pad_token=_lowerCamelCase , mask_token=_lowerCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_lowerCamelCase , )
UpperCamelCase_ : int =vocab_file
UpperCamelCase_ : Tuple =monolingual_vocab_file
UpperCamelCase_ : int =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_lowerCamelCase ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
UpperCamelCase_ : Dict ={}
UpperCamelCase_ : List[Any] =0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ : List[Any] =cnt
cnt += 1
with open(_lowerCamelCase , 'r' , encoding='utf-8' ) as f:
for line in f.readlines():
UpperCamelCase_ : str =line.strip().split()[0]
UpperCamelCase_ : Optional[int] =len(self.fairseq_tokens_to_ids )
if str(_lowerCamelCase ) not in self.fairseq_tokens_to_ids:
UpperCamelCase_ : Tuple =len(self.fairseq_tokens_to_ids )
UpperCamelCase_ : Optional[int] ={v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self :Optional[int] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =self.__dict__.copy()
UpperCamelCase_ : List[str] =None
UpperCamelCase_ : Dict =self.sp_model.serialized_model_proto()
return state
def __setstate__( self :List[str] , _lowerCamelCase :List[Any] ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCamelCase_ : List[Any] ={}
UpperCamelCase_ : str =spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCamelCase_ : Optional[Any] =[self.cls_token_id]
UpperCamelCase_ : Optional[int] =[self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self :Optional[int] , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None , _lowerCamelCase :bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_lowerCamelCase , token_ids_a=_lowerCamelCase , already_has_special_tokens=_lowerCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(_lowerCamelCase )) + [1]
return [1] + ([0] * len(_lowerCamelCase )) + [1, 1] + ([0] * len(_lowerCamelCase )) + [1]
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :List[int] , _lowerCamelCase :Optional[List[int]] = None ):
'''simple docstring'''
UpperCamelCase_ : Optional[Any] =[self.sep_token_id]
UpperCamelCase_ : Any =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def lowerCamelCase_ ( self :int ):
'''simple docstring'''
UpperCamelCase_ : Tuple ={self.convert_ids_to_tokens(_lowerCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def lowerCamelCase_ ( self :Optional[Any] , _lowerCamelCase :str ):
'''simple docstring'''
return self.sp_model.encode(_lowerCamelCase , out_type=_lowerCamelCase )
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :int ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def lowerCamelCase_ ( self :List[Any] , _lowerCamelCase :Union[str, Any] ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def lowerCamelCase_ ( self :int , _lowerCamelCase :Tuple ):
'''simple docstring'''
UpperCamelCase_ : List[Any] =''.join(_lowerCamelCase ).replace(_lowerCamelCase , ' ' ).strip()
return out_string
def lowerCamelCase_ ( self :Tuple , _lowerCamelCase :str , _lowerCamelCase :Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_lowerCamelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
UpperCamelCase_ : List[str] =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
UpperCamelCase_ : Optional[Any] =os.path.join(
_lowerCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['monolingual_vocab_file'] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_lowerCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_lowerCamelCase , 'wb' ) as fi:
UpperCamelCase_ : Optional[Any] =self.sp_model.serialized_model_proto()
fi.write(_lowerCamelCase )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
_lowerCamelCase ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , _lowerCamelCase )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(_lowerCamelCase , 'w' , encoding='utf-8' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f'''{str(_lowerCamelCase )} \n''' )
return out_vocab_file, out_monolingual_vocab_file
| 395 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def A_ ( __lowercase = "https://www.worldometers.info/coronavirus" ):
UpperCamelCase_ : Dict =BeautifulSoup(requests.get(__lowercase ).text , 'html.parser' )
UpperCamelCase_ : List[Any] =soup.findAll('h1' )
UpperCamelCase_ : List[str] =soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(__lowercase , __lowercase )}
if __name__ == "__main__":
print('\033[1m' + 'COVID-19 Status of the World' + '\033[0m\n')
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""")
| 395 | 1 |
'''simple docstring'''
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Any:
__lowerCamelCase : Any = []
__lowerCamelCase : List[str] = []
for i in range(self.num_layers ):
__lowerCamelCase : Optional[Any] = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase : Optional[int] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = resnets
__lowerCamelCase : str = attentions
if self.add_downsample:
__lowerCamelCase : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Dict:
__lowerCamelCase : Dict = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCamelCase : List[Any] = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = attn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase : Optional[Any] = self.downsamplers_a(SCREAMING_SNAKE_CASE_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Union[str, Any]:
__lowerCamelCase : Tuple = []
for i in range(self.num_layers ):
__lowerCamelCase : Optional[int] = self.in_channels if i == 0 else self.out_channels
__lowerCamelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=SCREAMING_SNAKE_CASE_ , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Union[str, Any] = resnets
if self.add_downsample:
__lowerCamelCase : Any = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Union[str, Any]:
__lowerCamelCase : int = ()
for resnet in self.resnets:
__lowerCamelCase : Dict = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCamelCase : int = self.downsamplers_a(SCREAMING_SNAKE_CASE_ )
output_states += (hidden_states,)
return hidden_states, output_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Optional[Any] = []
__lowerCamelCase : Dict = []
for i in range(self.num_layers ):
__lowerCamelCase : Union[str, Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase : Optional[Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[int] = resnets
__lowerCamelCase : Optional[Any] = attentions
if self.add_upsample:
__lowerCamelCase : Union[str, Any] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Tuple:
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCamelCase : int = res_hidden_states_tuple[-1]
__lowerCamelCase : str = res_hidden_states_tuple[:-1]
__lowerCamelCase : Optional[int] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase : List[str] = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Dict = attn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
if self.add_upsample:
__lowerCamelCase : Tuple = self.upsamplers_a(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : bool = True
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> str:
__lowerCamelCase : Tuple = []
for i in range(self.num_layers ):
__lowerCamelCase : Dict = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCamelCase : str = self.prev_output_channel if i == 0 else self.out_channels
__lowerCamelCase : Optional[int] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = resnets
if self.add_upsample:
__lowerCamelCase : Tuple = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> List[Any]:
for resnet in self.resnets:
# pop res hidden states
__lowerCamelCase : List[Any] = res_hidden_states_tuple[-1]
__lowerCamelCase : Any = res_hidden_states_tuple[:-1]
__lowerCamelCase : Any = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCamelCase : Optional[int] = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
if self.add_upsample:
__lowerCamelCase : int = self.upsamplers_a(SCREAMING_SNAKE_CASE_ )
return hidden_states
class UpperCAmelCase_ (nn.Module ):
"""simple docstring"""
lowerCamelCase : int
lowerCamelCase : float = 0.0
lowerCamelCase : int = 1
lowerCamelCase : int = 1
lowerCamelCase : bool = False
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase_ ( self ) -> Optional[Any]:
# there is always at least one resnet
__lowerCamelCase : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCamelCase : List[str] = []
for _ in range(self.num_layers ):
__lowerCamelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[Any] = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = resnets
__lowerCamelCase : Optional[int] = attentions
def __call__( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=True ) -> Dict:
__lowerCamelCase : List[str] = self.resnets[0](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCamelCase : Optional[int] = attn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Tuple = resnet(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , deterministic=SCREAMING_SNAKE_CASE_ )
return hidden_states
| 13 |
from __future__ import annotations
__UpperCAmelCase : Any = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowerCamelCase_ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowerCamelCase_ ( UpperCamelCase_ ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowerCamelCase_ ( UpperCamelCase_ ):
if location := find_empty_location(UpperCamelCase_ ):
_a , _a : str = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
_a : List[str] = digit
if sudoku(UpperCamelCase_ ) is not None:
return grid
_a : Optional[int] = 0
return None
def lowerCamelCase_ ( UpperCamelCase_ ):
for row in grid:
for cell in row:
print(UpperCamelCase_ , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__UpperCAmelCase : List[Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 471 | 0 |
"""simple docstring"""
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger('''transformers.models.speecht5''')
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
hf_model.apply_weight_norm()
_UpperCAmelCase = checkpoint['input_conv.weight_g']
_UpperCAmelCase = checkpoint['input_conv.weight_v']
_UpperCAmelCase = checkpoint['input_conv.bias']
for i in range(len(config.upsample_rates ) ):
_UpperCAmelCase = checkpoint[F"""upsamples.{i}.1.weight_g"""]
_UpperCAmelCase = checkpoint[F"""upsamples.{i}.1.weight_v"""]
_UpperCAmelCase = checkpoint[F"""upsamples.{i}.1.bias"""]
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_UpperCAmelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_g"""]
_UpperCAmelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.weight_v"""]
_UpperCAmelCase = checkpoint[F"""blocks.{i}.convs1.{j}.1.bias"""]
_UpperCAmelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_g"""]
_UpperCAmelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.weight_v"""]
_UpperCAmelCase = checkpoint[F"""blocks.{i}.convs2.{j}.1.bias"""]
_UpperCAmelCase = checkpoint['output_conv.1.weight_g']
_UpperCAmelCase = checkpoint['output_conv.1.weight_v']
_UpperCAmelCase = checkpoint['output_conv.1.bias']
hf_model.remove_weight_norm()
@torch.no_grad()
def __lowerCamelCase ( SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE=None,SCREAMING_SNAKE_CASE=None,) -> Any:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = SpeechTaHifiGanConfig.from_pretrained(SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = SpeechTaHifiGanConfig()
_UpperCAmelCase = SpeechTaHifiGan(SCREAMING_SNAKE_CASE )
_UpperCAmelCase = torch.load(SCREAMING_SNAKE_CASE )
load_weights(orig_checkpoint['model']['generator'],SCREAMING_SNAKE_CASE,SCREAMING_SNAKE_CASE )
_UpperCAmelCase = np.load(SCREAMING_SNAKE_CASE )
_UpperCAmelCase = stats[0].reshape(-1 )
_UpperCAmelCase = stats[1].reshape(-1 )
_UpperCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
_UpperCAmelCase = torch.from_numpy(SCREAMING_SNAKE_CASE ).float()
model.save_pretrained(SCREAMING_SNAKE_CASE )
if repo_id:
print('Pushing to the hub...' )
model.push_to_hub(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
parser.add_argument('''--checkpoint_path''', required=True, default=None, type=str, help='''Path to original checkpoint''')
parser.add_argument('''--stats_path''', required=True, default=None, type=str, help='''Path to stats.npy file''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--pytorch_dump_folder_path''', required=True, default=None, type=str, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--push_to_hub''', default=None, type=str, help='''Where to upload the converted model on the 🤗 hub.'''
)
lowerCAmelCase_ = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 494 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class lowerCAmelCase ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=18 , a__=30 , a__=4_00 , a__=True , a__=None , a__=True , a__=None , a__=True , a__=[0.48_145_466, 0.4_578_275, 0.40_821_073] , a__=[0.26_862_954, 0.26_130_258, 0.27_577_711] , a__=True , ):
_UpperCAmelCase = size if size is not None else {'height': 2_24, 'width': 2_24}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
_UpperCAmelCase = do_convert_rgb
def __A ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def __A ( self , a__=False , a__=False , a__=False ):
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
_UpperCAmelCase = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
2_55 , size=(self.num_channels, self.max_resolution, self.max_resolution) , dtype=np.uinta ) )
else:
_UpperCAmelCase = []
for i in range(self.batch_size ):
_UpperCAmelCase , _UpperCAmelCase = np.random.choice(np.arange(self.min_resolution , self.max_resolution ) , 2 )
image_inputs.append(np.random.randint(2_55 , size=(self.num_channels, width, height) , dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
_UpperCAmelCase = [Image.fromarray(np.moveaxis(a__ , 0 , -1 ) ) for x in image_inputs]
if torchify:
_UpperCAmelCase = [torch.from_numpy(a__ ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def __A ( self ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , do_center_crop=a__ )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , 'do_resize' ) )
self.assertTrue(hasattr(a__ , 'size' ) )
self.assertTrue(hasattr(a__ , 'do_center_crop' ) )
self.assertTrue(hasattr(a__ , 'center_crop' ) )
self.assertTrue(hasattr(a__ , 'do_normalize' ) )
self.assertTrue(hasattr(a__ , 'image_mean' ) )
self.assertTrue(hasattr(a__ , 'image_std' ) )
self.assertTrue(hasattr(a__ , 'do_convert_rgb' ) )
def __A ( self ):
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'height': 2_24, 'width': 2_24} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
@require_torch
@require_vision
class lowerCAmelCase ( snake_case , unittest.TestCase ):
lowerCAmelCase__ = ChineseCLIPImageProcessor if is_vision_available() else None
def __A ( self ):
_UpperCAmelCase = ChineseCLIPImageProcessingTester(self , num_channels=4 , do_center_crop=a__ )
_UpperCAmelCase = 3
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , 'do_resize' ) )
self.assertTrue(hasattr(a__ , 'size' ) )
self.assertTrue(hasattr(a__ , 'do_center_crop' ) )
self.assertTrue(hasattr(a__ , 'center_crop' ) )
self.assertTrue(hasattr(a__ , 'do_normalize' ) )
self.assertTrue(hasattr(a__ , 'image_mean' ) )
self.assertTrue(hasattr(a__ , 'image_std' ) )
self.assertTrue(hasattr(a__ , 'do_convert_rgb' ) )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = self.image_processor_tester.prepare_inputs(equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(a__ , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 494 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 177 |
def _lowerCAmelCase ( A__: str , A__: Tuple ):
'''simple docstring'''
print('''\nThe shortest path matrix using Floyd Warshall algorithm\n''' )
for i in range(A__ ):
for j in range(A__ ):
if dist[i][j] != float('''inf''' ):
print(int(dist[i][j] ) , end='''\t''' )
else:
print('''INF''' , end='''\t''' )
print()
def _lowerCAmelCase ( A__: List[Any] , A__: Any ):
'''simple docstring'''
UpperCAmelCase = [[float('''inf''' ) for _ in range(A__ )] for _ in range(A__ )]
for i in range(A__ ):
for j in range(A__ ):
UpperCAmelCase = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(A__ ):
# looping through rows of graph array
for i in range(A__ ):
# looping through columns of graph array
for j in range(A__ ):
if (
dist[i][k] != float('''inf''' )
and dist[k][j] != float('''inf''' )
and dist[i][k] + dist[k][j] < dist[i][j]
):
UpperCAmelCase = dist[i][k] + dist[k][j]
_print_dist(A__ , A__ )
return dist, v
if __name__ == "__main__":
__magic_name__ = int(input("Enter number of vertices: "))
__magic_name__ = int(input("Enter number of edges: "))
__magic_name__ = [[float("inf") for i in range(v)] for j in range(v)]
for i in range(v):
__magic_name__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print("\nEdge ", i + 1)
__magic_name__ = int(input("Enter source:"))
__magic_name__ = int(input("Enter destination:"))
__magic_name__ = float(input("Enter weight:"))
__magic_name__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 254 | 0 |
'''simple docstring'''
from __future__ import annotations
def snake_case_ (_a : Any , _a : str , _a : Tuple , _a : int ): # noqa: E741
while r - l > 1:
UpperCAmelCase = (l + r) // 2
if v[m] >= key:
UpperCAmelCase = m
else:
UpperCAmelCase = m # noqa: E741
return r
def snake_case_ (_a : list[int] ):
if len(_a ) == 0:
return 0
UpperCAmelCase = [0] * len(_a )
UpperCAmelCase = 1
UpperCAmelCase = v[0]
for i in range(1 , len(_a ) ):
if v[i] < tail[0]:
UpperCAmelCase = v[i]
elif v[i] > tail[length - 1]:
UpperCAmelCase = v[i]
length += 1
else:
UpperCAmelCase = v[i]
return length
if __name__ == "__main__":
import doctest
doctest.testmod()
| 719 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def snake_case_ (_a : List[str] ):
for param in module.parameters():
UpperCAmelCase = False
def snake_case_ ():
UpperCAmelCase = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
UpperCAmelCase = '''mps'''
if device == "mps":
print(
'''WARNING: MPS currently doesn\'t seem to work, and messes up backpropagation without any visible torch'''
''' errors. I recommend using CUDA on a colab notebook or CPU instead if you\'re facing inexplicable issues'''
''' with generations.''' )
return device
def snake_case_ (_a : Optional[Any] ):
UpperCAmelCase = plt.imshow(_a )
fig.axes.get_xaxis().set_visible(_a )
fig.axes.get_yaxis().set_visible(_a )
plt.show()
def snake_case_ ():
UpperCAmelCase = datetime.now()
UpperCAmelCase = current_time.strftime('''%H:%M:%S''' )
return timestamp
| 358 | 0 |
"""simple docstring"""
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
'''The `inpainting.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionInpaintPipeline` instead.'''
)
| 480 |
"""simple docstring"""
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowercase ( _UpperCAmelCase):
"""simple docstring"""
@require_torch
def __UpperCamelCase (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : int = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
snake_case_ : Any = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
snake_case_ : Dict = """
import socket
def offline_socket(*args, **kwargs): raise RuntimeError(\"Offline mode is enabled, we shouldn't access internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
snake_case_ : Tuple = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase__ )
BertModel.from_pretrained(lowercase__ )
BertTokenizer.from_pretrained(lowercase__ )
pipeline(task="""fill-mask""" , model=lowercase__ )
# baseline - just load from_pretrained with normal network
snake_case_ : Optional[Any] = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
snake_case_ : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : str = """1"""
snake_case_ : List[Any] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : List[str] = """
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
"""
snake_case_ : int = """
mname = \"hf-internal-testing/tiny-random-bert\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
BertTokenizer.from_pretrained(mname)
pipe = pipeline(task=\"fill-mask\", model=mname)
print(\"success\")
"""
snake_case_ : Optional[int] = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Faking flaky internet\")
socket.socket = offline_socket
"""
# Force fetching the files so that we can use the cache
snake_case_ : Optional[int] = """hf-internal-testing/tiny-random-bert"""
BertConfig.from_pretrained(lowercase__ )
BertModel.from_pretrained(lowercase__ )
BertTokenizer.from_pretrained(lowercase__ )
pipeline(task="""fill-mask""" , model=lowercase__ )
# baseline - just load from_pretrained with normal network
snake_case_ : int = [sys.executable, """-c""", """\n""".join([load, run, mock] )]
# should succeed
snake_case_ : List[Any] = self.get_env()
snake_case_ : Dict = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
snake_case_ : Optional[int] = """
from transformers import BertConfig, BertModel, BertTokenizer
"""
snake_case_ : Dict = """
mname = \"hf-internal-testing/tiny-random-bert-sharded\"
BertConfig.from_pretrained(mname)
BertModel.from_pretrained(mname)
print(\"success\")
"""
snake_case_ : int = """
import socket
def offline_socket(*args, **kwargs): raise ValueError(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
# baseline - just load from_pretrained with normal network
snake_case_ : List[Any] = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
snake_case_ : List[str] = self.get_env()
snake_case_ : str = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# next emulate no network
snake_case_ : Any = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : Optional[Any] = """1"""
snake_case_ : int = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : str = """
from transformers import pipeline
"""
snake_case_ : Dict = """
mname = \"hf-internal-testing/tiny-random-bert\"
pipe = pipeline(model=mname)
"""
snake_case_ : Dict = """
import socket
def offline_socket(*args, **kwargs): raise socket.error(\"Offline mode is enabled\")
socket.socket = offline_socket
"""
snake_case_ : List[str] = self.get_env()
snake_case_ : Dict = """1"""
snake_case_ : int = [sys.executable, """-c""", """\n""".join([load, mock, run] )]
snake_case_ : Optional[int] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
"""You cannot infer task automatically within `pipeline` when using offline mode""" , result.stderr.decode().replace("""\n""" , """""" ) , )
@require_torch
def __UpperCamelCase (self ):
snake_case_ : int = """
from transformers import AutoModel
"""
snake_case_ : Optional[int] = """
mname = \"hf-internal-testing/test_dynamic_model\"
AutoModel.from_pretrained(mname, trust_remote_code=True)
print(\"success\")
"""
# baseline - just load from_pretrained with normal network
snake_case_ : Dict = [sys.executable, """-c""", """\n""".join([load, run] )]
# should succeed
snake_case_ : Optional[Any] = self.get_env()
snake_case_ : List[str] = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
snake_case_ : Any = """1"""
snake_case_ : Dict = subprocess.run(lowercase__ , env=lowercase__ , check=lowercase__ , capture_output=lowercase__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn("""success""" , result.stdout.decode() )
| 480 | 1 |
'''simple docstring'''
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
_A = 0B1_0_1_1_0_0_1_1_1_1_1_0_1_1_0_0_1_0_0_1_0_0_0_0_0_1_1_1_1_0_1_1_1_0_1_1_0_0_0_1_1_0_0_1_1_1_1_0
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
_A = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__(self ) -> Union[str, Any]:
lowercase_ : Any = WATERMARK_BITS
lowercase_ : List[str] = WatermarkEncoder()
self.encoder.set_watermark('bits' , self.watermark )
def _lowerCamelCase (self , _a ) -> Dict:
# can't encode images that are smaller than 256
if images.shape[-1] < 256:
return images
lowercase_ : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
lowercase_ : Optional[Any] = [self.encoder.encode(_a , 'dwtDct' ) for image in images]
lowercase_ : List[Any] = torch.from_numpy(np.array(_a ) ).permute(0 , 3 , 1 , 2 )
lowercase_ : Optional[int] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 438 | '''simple docstring'''
from decimal import Decimal, getcontext
from math import ceil, factorial
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
lowercase_ : str = precision
lowercase_ : List[str] = ceil(precision / 14 )
lowercase_ : Union[str, Any] = 426_880 * Decimal(10_005 ).sqrt()
lowercase_ : List[Any] = 1
lowercase_ : Optional[int] = 13_591_409
lowercase_ : Dict = Decimal(SCREAMING_SNAKE_CASE_ )
for k in range(1 , SCREAMING_SNAKE_CASE_ ):
lowercase_ : List[str] = factorial(6 * k ) // (factorial(3 * k ) * factorial(SCREAMING_SNAKE_CASE_ ) ** 3)
linear_term += 545_140_134
exponential_term *= -262_537_412_640_768_000
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
_A = 5_0
print(F"""The first {n} digits of pi is: {pi(n)}""")
| 438 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.