code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_glpn import GLPNImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class _SCREAMING_SNAKE_CASE( a_ ):
def __init__( self ,*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) -> None:
"""simple docstring"""
warnings.warn(
'''The class GLPNFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use GLPNImageProcessor instead.''' ,SCREAMING_SNAKE_CASE__ ,)
super().__init__(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ) | 191 |
def a_ ( _A , _A ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def a_ ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 307 | 0 |
from __future__ import annotations
from random import choice
def lowerCamelCase_ ( _lowerCamelCase ):
return choice(_a )
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
lowerCamelCase__ : Union[str, Any] = random_pivot(_a )
# partition based on pivot
# linear time
lowerCamelCase__ : Union[str, Any] = [e for e in lst if e < pivot]
lowerCamelCase__ : Optional[Any] = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_a ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_a ) < k - 1:
return kth_number(_a , k - len(_a ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 361 |
"""simple docstring"""
import json
import os
import shutil
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoConfig, BertConfig, GPTaConfig
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import TOKEN, USER, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_configuration import CustomConfig # noqa E402
A_ : Any = {
"return_dict": False,
"output_hidden_states": True,
"output_attentions": True,
"torchscript": True,
"torch_dtype": "float16",
"use_bfloat16": True,
"tf_legacy_loss": True,
"pruned_heads": {"a": 1},
"tie_word_embeddings": False,
"is_decoder": True,
"cross_attention_hidden_size": 1_28,
"add_cross_attention": True,
"tie_encoder_decoder": True,
"max_length": 50,
"min_length": 3,
"do_sample": True,
"early_stopping": True,
"num_beams": 3,
"num_beam_groups": 3,
"diversity_penalty": 0.5,
"temperature": 2.0,
"top_k": 10,
"top_p": 0.7,
"typical_p": 0.2,
"repetition_penalty": 0.8,
"length_penalty": 0.8,
"no_repeat_ngram_size": 5,
"encoder_no_repeat_ngram_size": 5,
"bad_words_ids": [1, 2, 3],
"num_return_sequences": 3,
"chunk_size_feed_forward": 5,
"output_scores": True,
"return_dict_in_generate": True,
"forced_bos_token_id": 2,
"forced_eos_token_id": 3,
"remove_invalid_values": True,
"architectures": ["BertModel"],
"finetuning_task": "translation",
"id2label": {0: "label"},
"label2id": {"label": "0"},
"tokenizer_class": "BertTokenizerFast",
"prefix": "prefix",
"bos_token_id": 6,
"pad_token_id": 7,
"eos_token_id": 8,
"sep_token_id": 9,
"decoder_start_token_id": 10,
"exponential_decay_length_penalty": (5, 1.01),
"suppress_tokens": [0, 1],
"begin_suppress_tokens": 2,
"task_specific_params": {"translation": "some_params"},
"problem_type": "regression",
}
@is_staging_test
class a_ ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def a__ (cls ):
'''simple docstring'''
lowerCamelCase__ : Tuple = TOKEN
HfFolder.save_token(lowerCamelCase_ )
@classmethod
def a__ (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token, repo_id='test-config' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='valid_org/test-config-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token, repo_id='test-dynamic-config' )
except HTTPError:
pass
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Optional[int] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('test-config', use_auth_token=self._token )
lowerCamelCase__ : List[str] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='test-config' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(lowerCamelCase_, repo_id='test-config', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : List[Any] = BertConfig.from_pretrained(f'''{USER}/test-config''' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = BertConfig(
vocab_size=9_9, hidden_size=3_2, num_hidden_layers=5, num_attention_heads=4, intermediate_size=3_7 )
config.push_to_hub('valid_org/test-config-org', use_auth_token=self._token )
lowerCamelCase__ : int = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
# Reset repo
delete_repo(token=self._token, repo_id='valid_org/test-config-org' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(
lowerCamelCase_, repo_id='valid_org/test-config-org', push_to_hub=lowerCamelCase_, use_auth_token=self._token )
lowerCamelCase__ : Tuple = BertConfig.from_pretrained('valid_org/test-config-org' )
for k, v in config.to_dict().items():
if k != "transformers_version":
self.assertEqual(lowerCamelCase_, getattr(lowerCamelCase_, lowerCamelCase_ ) )
def a__ (self ):
'''simple docstring'''
CustomConfig.register_for_auto_class()
lowerCamelCase__ : str = CustomConfig(attribute=4_2 )
config.push_to_hub('test-dynamic-config', use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(config.auto_map, {'AutoConfig': 'custom_configuration.CustomConfig'} )
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(f'''{USER}/test-dynamic-config''', trust_remote_code=lowerCamelCase_ )
# Can't make an isinstance check because the new_config is from the FakeConfig class of a dynamic module
self.assertEqual(new_config.__class__.__name__, 'CustomConfig' )
self.assertEqual(new_config.attribute, 4_2 )
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Tuple = GPTaConfig()
# attempt to modify each of int/float/bool/str config records and verify they were updated
lowerCamelCase__ : Union[str, Any] = c.n_embd + 1 # int
lowerCamelCase__ : Optional[Any] = c.resid_pdrop + 1.0 # float
lowerCamelCase__ : str = not c.scale_attn_weights # bool
lowerCamelCase__ : Any = c.summary_type + 'foo' # str
c.update_from_string(
f'''n_embd={n_embd},resid_pdrop={resid_pdrop},scale_attn_weights={scale_attn_weights},summary_type={summary_type}''' )
self.assertEqual(lowerCamelCase_, c.n_embd, 'mismatch for key: n_embd' )
self.assertEqual(lowerCamelCase_, c.resid_pdrop, 'mismatch for key: resid_pdrop' )
self.assertEqual(lowerCamelCase_, c.scale_attn_weights, 'mismatch for key: scale_attn_weights' )
self.assertEqual(lowerCamelCase_, c.summary_type, 'mismatch for key: summary_type' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Any = PretrainedConfig()
lowerCamelCase__ : Union[str, Any] = [key for key in base_config.__dict__ if key not in config_common_kwargs]
# If this part of the test fails, you have arguments to addin config_common_kwargs above.
self.assertListEqual(
lowerCamelCase_, ['is_encoder_decoder', '_name_or_path', '_commit_hash', 'transformers_version'] )
lowerCamelCase__ : str = [key for key, value in config_common_kwargs.items() if value == getattr(lowerCamelCase_, lowerCamelCase_ )]
if len(lowerCamelCase_ ) > 0:
raise ValueError(
'The following keys are set with the default values in'
' `test_configuration_common.config_common_kwargs` pick another value for them:'
f''' {', '.join(lowerCamelCase_ )}.''' )
def a__ (self ):
'''simple docstring'''
with self.assertRaises(lowerCamelCase_ ):
# config is in subfolder, the following should not work without specifying the subfolder
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder' )
lowerCamelCase__ : Any = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert-subfolder', subfolder='bert' )
self.assertIsNotNone(lowerCamelCase_ )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = mock.Mock()
lowerCamelCase__ : str = 5_0_0
lowerCamelCase__ : Union[str, Any] = {}
lowerCamelCase__ : Any = HTTPError
lowerCamelCase__ : str = {}
# Download this model to make sure it's in the cache.
lowerCamelCase__ : Dict = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('requests.Session.request', return_value=lowerCamelCase_ ) as mock_head:
lowerCamelCase__ : Union[str, Any] = BertConfig.from_pretrained('hf-internal-testing/tiny-random-bert' )
# This check we did call the fake head request
mock_head.assert_called()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = BertConfig.from_pretrained(
'https://huggingface.co/hf-internal-testing/tiny-random-bert/resolve/main/config.json' )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : str = AutoConfig.from_pretrained('bert-base-cased' )
lowerCamelCase__ : Optional[Any] = ['config.4.0.0.json']
with tempfile.TemporaryDirectory() as tmp_dir:
configuration.save_pretrained(lowerCamelCase_ )
lowerCamelCase__ : Tuple = 2
json.dump(configuration.to_dict(), open(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), 'w' ) )
# This should pick the new configuration file as the version of Transformers is > 4.0.0
lowerCamelCase__ : List[str] = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# Will need to be adjusted if we reach v42 and this test is still here.
# Should pick the old configuration file as the version of Transformers is < 4.42.0
lowerCamelCase__ : Optional[Any] = ['config.42.0.0.json']
lowerCamelCase__ : List[Any] = 7_6_8
configuration.save_pretrained(lowerCamelCase_ )
shutil.move(os.path.join(lowerCamelCase_, 'config.4.0.0.json' ), os.path.join(lowerCamelCase_, 'config.42.0.0.json' ) )
lowerCamelCase__ : str = AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 7_6_8 )
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : int = 'hf-internal-testing/test-two-configs'
import transformers as new_transformers
lowerCamelCase__ : Dict = 'v4.0.0'
lowerCamelCase__ , lowerCamelCase__ : str = new_transformers.models.auto.AutoConfig.from_pretrained(
lowerCamelCase_, return_unused_kwargs=lowerCamelCase_ )
self.assertEqual(new_configuration.hidden_size, 2 )
# This checks `_configuration_file` ia not kept in the kwargs by mistake.
self.assertDictEqual(lowerCamelCase_, {} )
# Testing an older version by monkey-patching the version in the module it's used.
import transformers as old_transformers
lowerCamelCase__ : Optional[Any] = 'v3.0.0'
lowerCamelCase__ : Optional[int] = old_transformers.models.auto.AutoConfig.from_pretrained(lowerCamelCase_ )
self.assertEqual(old_configuration.hidden_size, 7_6_8 )
| 316 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ = logging.get_logger(__name__)
A__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class __lowerCAmelCase ( lowerCamelCase__ ):
__lowerCamelCase = '''dpr'''
def __init__( self , _snake_case=30522 , _snake_case=768 , _snake_case=12 , _snake_case=12 , _snake_case=3072 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=2 , _snake_case=0.02 , _snake_case=1e-12 , _snake_case=0 , _snake_case="absolute" , _snake_case = 0 , **_snake_case , ):
"""simple docstring"""
super().__init__(pad_token_id=_snake_case , **_snake_case )
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = hidden_act
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = type_vocab_size
_lowerCAmelCase = initializer_range
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = projection_dim
_lowerCAmelCase = position_embedding_type
| 82 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
A__ = logging.get_logger(__name__)
def _UpperCAmelCase ( snake_case ):
"""simple docstring"""
_lowerCAmelCase = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
_lowerCAmelCase = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
_lowerCAmelCase = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
_lowerCAmelCase = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
_lowerCAmelCase = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(snake_case )-1}' )
if "norm" in key:
_lowerCAmelCase = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
_lowerCAmelCase = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
_lowerCAmelCase = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(snake_case )-1}' )
if "layer_norm1" in key:
_lowerCAmelCase = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
_lowerCAmelCase = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
_lowerCAmelCase = key[key.find("""block""" ) + len("""block""" )]
_lowerCAmelCase = key.replace(F'block{idx}' , F'block.{int(snake_case )-1}' )
if "attn.q" in key:
_lowerCAmelCase = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
_lowerCAmelCase = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
_lowerCAmelCase = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
_lowerCAmelCase = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
_lowerCAmelCase = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
_lowerCAmelCase = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
_lowerCAmelCase = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
_lowerCAmelCase = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
_lowerCAmelCase = key[key.find("""linear_c""" ) + len("""linear_c""" )]
_lowerCAmelCase = key.replace(F'linear_c{idx}' , F'linear_c.{int(snake_case )-1}' )
if "bot_conv" in key:
_lowerCAmelCase = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
_lowerCAmelCase = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
_lowerCAmelCase = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
_lowerCAmelCase = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
_lowerCAmelCase = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
_lowerCAmelCase = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
_lowerCAmelCase = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
_lowerCAmelCase = key.replace("""module.last_layer_depth""" , """head.head""" )
_lowerCAmelCase = value
return new_state_dict
def _UpperCAmelCase ( snake_case , snake_case ):
"""simple docstring"""
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.weight' )
_lowerCAmelCase = state_dict.pop(F'glpn.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
_lowerCAmelCase = kv_weight[
: config.hidden_sizes[i], :
]
_lowerCAmelCase = kv_bias[: config.hidden_sizes[i]]
_lowerCAmelCase = kv_weight[
config.hidden_sizes[i] :, :
]
_lowerCAmelCase = kv_bias[config.hidden_sizes[i] :]
def _UpperCAmelCase ( ):
"""simple docstring"""
_lowerCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_lowerCAmelCase = Image.open(requests.get(snake_case , stream=snake_case ).raw )
return image
@torch.no_grad()
def _UpperCAmelCase ( snake_case , snake_case , snake_case=False , snake_case=None ):
"""simple docstring"""
_lowerCAmelCase = GLPNConfig(hidden_sizes=[64, 1_28, 3_20, 5_12] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
_lowerCAmelCase = GLPNImageProcessor()
# prepare image
_lowerCAmelCase = prepare_img()
_lowerCAmelCase = image_processor(images=snake_case , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
_lowerCAmelCase = torch.load(snake_case , map_location=torch.device("""cpu""" ) )
# rename keys
_lowerCAmelCase = rename_keys(snake_case )
# key and value matrices need special treatment
read_in_k_v(snake_case , snake_case )
# create HuggingFace model and load state dict
_lowerCAmelCase = GLPNForDepthEstimation(snake_case )
model.load_state_dict(snake_case )
model.eval()
# forward pass
_lowerCAmelCase = model(snake_case )
_lowerCAmelCase = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
_lowerCAmelCase = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
_lowerCAmelCase = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F'Unknown model name: {model_name}' )
_lowerCAmelCase = torch.Size([1, 4_80, 6_40] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , snake_case , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=snake_case , )
image_processor.push_to_hub(
repo_path_or_name=Path(snake_case , snake_case ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=snake_case , )
if __name__ == "__main__":
A__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
A__ = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 82 | 1 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ ):
'''simple docstring'''
return " ".join(
"".join(word[::-1] ) if len(lowerCAmelCase_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 369 |
"""simple docstring"""
def UpperCAmelCase__ (lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0 for i in range(r + 1 )]
# nc0 = 1
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
__SCREAMING_SNAKE_CASE = min(lowerCAmelCase_ , lowerCAmelCase_ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 195 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = ['''BeitFeatureExtractor''']
lowerCamelCase = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 131 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class _a ( unittest.TestCase):
def __init__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str=13 , _SCREAMING_SNAKE_CASE : List[str]=7 , _SCREAMING_SNAKE_CASE : List[Any]=True , _SCREAMING_SNAKE_CASE : Dict=True , _SCREAMING_SNAKE_CASE : Optional[int]=True , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : Optional[int]=99 , _SCREAMING_SNAKE_CASE : Optional[int]=32 , _SCREAMING_SNAKE_CASE : Union[str, Any]=5 , _SCREAMING_SNAKE_CASE : Optional[int]=4 , _SCREAMING_SNAKE_CASE : Optional[int]=37 , _SCREAMING_SNAKE_CASE : Any="gelu" , _SCREAMING_SNAKE_CASE : Tuple=0.1 , _SCREAMING_SNAKE_CASE : Any=0.1 , _SCREAMING_SNAKE_CASE : Tuple=512 , _SCREAMING_SNAKE_CASE : Optional[int]=16 , _SCREAMING_SNAKE_CASE : Optional[int]=2 , _SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE : Tuple=4 , )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : List[Any] = seq_length
lowerCAmelCase__ : Any = is_training
lowerCAmelCase__ : str = use_attention_mask
lowerCAmelCase__ : Union[str, Any] = use_token_type_ids
lowerCAmelCase__ : List[Any] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Optional[Any] = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : Any = num_attention_heads
lowerCAmelCase__ : List[Any] = intermediate_size
lowerCAmelCase__ : Any = hidden_act
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : int = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : List[str] = type_vocab_size
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : int = num_choices
def UpperCAmelCase__( self : List[str] )-> Optional[Any]:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_attention_mask:
lowerCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : Any = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=_SCREAMING_SNAKE_CASE , )
return config, input_ids, attention_mask
def UpperCAmelCase__( self : Dict )-> Union[str, Any]:
lowerCAmelCase__ : List[str] = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Dict = config_and_inputs
lowerCAmelCase__ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class _a ( _lowercase , unittest.TestCase):
_a : str = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCAmelCase__( self : str )-> List[str]:
lowerCAmelCase__ : Tuple = FlaxDistilBertModelTester(self )
@slow
def UpperCAmelCase__( self : Dict )-> Tuple:
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Tuple = model_class_name.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_flax
class _a ( unittest.TestCase):
@slow
def UpperCAmelCase__( self : Optional[int] )-> List[Any]:
lowerCAmelCase__ : int = FlaxDistilBertModel.from_pretrained('''distilbert-base-uncased''' )
lowerCAmelCase__ : Tuple = np.array([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
lowerCAmelCase__ : str = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )[0]
lowerCAmelCase__ : str = (1, 11, 768)
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : str = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
| 131 | 1 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def _lowercase ( __snake_case ) -> Any:
__lowerCAmelCase : Union[str, Any] = os.path.join(args.tf_model_dir ,"parameters.json" )
__lowerCAmelCase : Tuple = json.loads(open(__snake_case ).read() )
if not params:
raise ValueError(
F"""It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file.""" )
if not args.output.endswith(".pt" ):
__lowerCAmelCase : Dict = args.output + ".pt"
__lowerCAmelCase : str = OrderedDict()
with tf.device("/CPU:0" ):
__lowerCAmelCase : int = tf.train.load_checkpoint(args.tf_model_dir )
__lowerCAmelCase : Dict = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
__lowerCAmelCase : Any = reader.get_tensor(__snake_case ).astype(np.floataa )
if key_name.endswith("/adam_m" ) or key_name.endswith("/adam_v" ):
continue
if key_name.startswith("pasts/" ):
if key_name.startswith("pasts/mlp" ):
__lowerCAmelCase : Optional[Any] = int(key_name[9] )
elif key_name.startswith("pasts/out" ):
__lowerCAmelCase : Tuple = 8
__lowerCAmelCase : Optional[int] = "model.sqout.%d.weight" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
__lowerCAmelCase : List[str] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : Tuple = torch.tensor(__snake_case )
elif key_name.startswith("model/moe" ):
__lowerCAmelCase : Union[str, Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/switch_gating/kernel" ):
__lowerCAmelCase : Tuple = "model.blocks.%d.feed_forward.mlp.router.classifier.weight" % player
__lowerCAmelCase : Tuple = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : Optional[Any] = torch.tensor(__snake_case )
elif key_name.endswith("/softmlp/kernel" ):
__lowerCAmelCase : Optional[int] = "model.blocks.%d.feed_forward.soft_bypass_mlp.weight" % player
__lowerCAmelCase : str = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : str = torch.tensor(__snake_case )
elif key_name.endswith("/wo/kernel" ) or key_name.endswith("/wi/kernel" ):
__lowerCAmelCase : Tuple = key_name[-9:-7]
for i in range(16 ):
__lowerCAmelCase : Optional[int] = "model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight" % (player, i, nlayer)
__lowerCAmelCase : List[Any] = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
__lowerCAmelCase : Dict = torch.tensor(__snake_case )
elif key_name.startswith("model/mlp" ):
__lowerCAmelCase : Optional[int] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/p1/kernel" ):
__lowerCAmelCase : Any = "model.blocks.%d.feed_forward.mlp.wi.weight" % player
__lowerCAmelCase : Optional[int] = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : Dict = torch.tensor(__snake_case )
elif key_name.endswith("/p1/bias" ):
__lowerCAmelCase : Tuple = "model.blocks.%d.feed_forward.mlp.wi.bias" % player
__lowerCAmelCase : Optional[Any] = vnp.copy() # same because it is one dimensional
__lowerCAmelCase : Optional[Any] = torch.tensor(__snake_case )
elif key_name.endswith("/p2/kernel" ):
__lowerCAmelCase : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.weight" % player
__lowerCAmelCase : Any = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : Tuple = torch.tensor(__snake_case )
elif key_name.endswith("/p2/bias" ):
__lowerCAmelCase : List[Any] = "model.blocks.%d.feed_forward.mlp.wo.bias" % player
__lowerCAmelCase : Union[str, Any] = vnp.copy() # same because it is one dimensional
__lowerCAmelCase : Tuple = torch.tensor(__snake_case )
elif key_name.startswith("model/ln" ):
__lowerCAmelCase : Optional[Any] = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
__lowerCAmelCase : List[str] = "model.blocks.%d.feed_forward.norm.bias" % player
__lowerCAmelCase : List[str] = vnp.copy() # same because it is one dimensional
__lowerCAmelCase : Tuple = torch.tensor(__snake_case )
elif key_name.endswith("/g" ):
__lowerCAmelCase : Optional[int] = "model.blocks.%d.feed_forward.norm.weight" % player
__lowerCAmelCase : Optional[int] = vnp.copy() # same because it is one dimensional
__lowerCAmelCase : List[str] = torch.tensor(__snake_case )
elif key_name.startswith("model/att" ):
__lowerCAmelCase : Optional[Any] = int(key_name[9:].split("/" )[0] )
if key_name.endswith("/qkv/kernel" ):
__lowerCAmelCase : int = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
__lowerCAmelCase : str = state[:, 0, :, :]
__lowerCAmelCase : Union[str, Any] = state[:, 1, :, :]
__lowerCAmelCase : List[Any] = state[:, 2, :, :]
__lowerCAmelCase : List[Any] = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : str = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : int = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : Dict = "model.blocks.%d.self_attn.self_attn.q_proj.weight" % player
__lowerCAmelCase : Tuple = torch.tensor(__snake_case )
__lowerCAmelCase : Union[str, Any] = "model.blocks.%d.self_attn.self_attn.k_proj.weight" % player
__lowerCAmelCase : str = torch.tensor(__snake_case )
__lowerCAmelCase : int = "model.blocks.%d.self_attn.self_attn.v_proj.weight" % player
__lowerCAmelCase : Union[str, Any] = torch.tensor(__snake_case )
elif key_name.endswith("/o/kernel" ):
__lowerCAmelCase : Tuple = "model.blocks.%d.self_attn.self_attn.out_proj.weight" % player
__lowerCAmelCase : List[str] = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : str = torch.tensor(__snake_case )
elif key_name.startswith("model/an" ):
__lowerCAmelCase : Dict = int(key_name[8:].split("/" )[0] )
if key_name.endswith("/b" ):
__lowerCAmelCase : str = "model.blocks.%d.self_attn.norm.bias" % player
__lowerCAmelCase : str = vnp.copy() # same because it is one dimensional
__lowerCAmelCase : Optional[int] = torch.tensor(__snake_case )
elif key_name.endswith("/g" ):
__lowerCAmelCase : str = "model.blocks.%d.self_attn.norm.weight" % player
__lowerCAmelCase : Dict = vnp.copy() # same because it is one dimensional
__lowerCAmelCase : List[Any] = torch.tensor(__snake_case )
elif (
key_name.startswith("model/wte" )
or key_name.startswith("model/wpe" )
or key_name.startswith("model/ete" )
):
__lowerCAmelCase : Optional[Any] = {"wte": "embed_tokens", "wpe": "position_embeddings", "ete": "extra_position_embeddings"}[
key_name[-3:]
]
__lowerCAmelCase : Dict = "model.%s.weight" % nlayer
__lowerCAmelCase : List[Any] = vnp.copy() # same in embedded
__lowerCAmelCase : Tuple = torch.tensor(__snake_case )
if key_name.startswith("model/wte" ):
__lowerCAmelCase : Any = "lm_head.weight"
__lowerCAmelCase : Optional[int] = vnp.copy() # same in embedded
__lowerCAmelCase : str = torch.tensor(__snake_case )
elif key_name.startswith("model/wob" ):
__lowerCAmelCase : Any = "final_logits_bias"
__lowerCAmelCase : Union[str, Any] = vnp.copy() # same in embedded
__lowerCAmelCase : Dict = state.reshape((1, -1) )
__lowerCAmelCase : int = torch.tensor(__snake_case )
elif key_name == "model/dense/kernel":
__lowerCAmelCase : Optional[int] = "model.last_project.weight"
__lowerCAmelCase : Dict = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
__lowerCAmelCase : List[Any] = torch.tensor(__snake_case )
elif key_name == "model/dense_1/bias":
__lowerCAmelCase : Optional[int] = "model.last_project.bias"
__lowerCAmelCase : Tuple = vnp.copy() # same because it is one dimensional
__lowerCAmelCase : Tuple = torch.tensor(__snake_case )
torch.save(__snake_case ,args.output )
if __name__ == "__main__":
__snake_case : Optional[int] = argparse.ArgumentParser(
description='model converter.', formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--tf_model_dir', metavar='PATH', type=str, required=True, help='import model')
parser.add_argument('--output', metavar='PATH', type=str, required=True, help='output model')
__snake_case : Optional[Any] = parser.parse_args()
convert_tf_gptsan_to_pt(args) | 370 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_video_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import VivitImageProcessor
class A__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self: str , _SCREAMING_SNAKE_CASE: Any , _SCREAMING_SNAKE_CASE: List[Any]=7 , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: int=10 , _SCREAMING_SNAKE_CASE: Tuple=18 , _SCREAMING_SNAKE_CASE: Union[str, Any]=30 , _SCREAMING_SNAKE_CASE: Any=400 , _SCREAMING_SNAKE_CASE: List[str]=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=None , _SCREAMING_SNAKE_CASE: str=True , _SCREAMING_SNAKE_CASE: Union[str, Any]=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Any=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE: Dict=None , ) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = size if size is not None else {"shortest_edge": 18}
__lowerCAmelCase : int = crop_size if crop_size is not None else {"height": 18, "width": 18}
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : List[str] = num_channels
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Tuple = min_resolution
__lowerCAmelCase : Tuple = max_resolution
__lowerCAmelCase : str = do_resize
__lowerCAmelCase : Optional[int] = size
__lowerCAmelCase : Optional[int] = do_normalize
__lowerCAmelCase : Dict = image_mean
__lowerCAmelCase : List[Any] = image_std
__lowerCAmelCase : List[Any] = crop_size
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class A__ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = VivitImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = VivitImageProcessingTester(self)
@property
def _SCREAMING_SNAKE_CASE ( self: int) -> Tuple:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : int = self.image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_mean"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "image_std"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_normalize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_resize"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "do_center_crop"))
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , "size"))
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict)
self.assertEqual(image_processor.size , {"shortest_edge": 18})
self.assertEqual(image_processor.crop_size , {"height": 18, "width": 18})
__lowerCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84)
self.assertEqual(image_processor.size , {"shortest_edge": 42})
self.assertEqual(image_processor.crop_size , {"height": 84, "width": 84})
def _SCREAMING_SNAKE_CASE ( self: int) -> Union[str, Any]:
"""simple docstring"""
__lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict)
# create random PIL videos
__lowerCAmelCase : Dict = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , Image.Image)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : str = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = self.image_processing_class(**self.image_processor_dict)
# create random numpy tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , np.ndarray)
# Test not batched input
__lowerCAmelCase : Any = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : List[str] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _SCREAMING_SNAKE_CASE ( self: Dict) -> int:
"""simple docstring"""
__lowerCAmelCase : str = self.image_processing_class(**self.image_processor_dict)
# create random PyTorch tensors
__lowerCAmelCase : Optional[int] = prepare_video_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE)
for video in video_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
self.assertIsInstance(video[0] , torch.Tensor)
# Test not batched input
__lowerCAmelCase : List[str] = image_processing(video_inputs[0] , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
1,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
__lowerCAmelCase : Any = image_processing(_SCREAMING_SNAKE_CASE , return_tensors="pt").pixel_values
self.assertEqual(
encoded_videos.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_frames,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , ) | 58 | 0 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
lowerCAmelCase__ = TypeVar('''T''')
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return (position - 1) // 2
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return (2 * position) + 1
def __lowerCamelCase ( lowerCamelCase__ ):
"""simple docstring"""
return (2 * position) + 2
class snake_case__(Generic[T] ):
"""simple docstring"""
def __init__( self : List[str] ):
lowercase__ : list[tuple[T, int]] = []
lowercase__ : dict[T, int] = {}
lowercase__ : int = 0
def __len__( self : Optional[Any] ):
return self.elements
def __repr__( self : int ):
return str(self.heap )
def snake_case ( self : Tuple ):
# Check if the priority queue is empty
return self.elements == 0
def snake_case ( self : Dict , SCREAMING_SNAKE_CASE : T , SCREAMING_SNAKE_CASE : int ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
lowercase__ : int = self.elements
self.elements += 1
self._bubble_up(SCREAMING_SNAKE_CASE )
def snake_case ( self : List[Any] ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
lowercase__ , lowercase__ : Any = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
lowercase__ , lowercase__ : Tuple = self.heap[0]
self._bubble_down(SCREAMING_SNAKE_CASE )
return elem
def snake_case ( self : int , SCREAMING_SNAKE_CASE : T , SCREAMING_SNAKE_CASE : int ):
# Update the weight of the given key
lowercase__ : str = self.position_map[elem]
lowercase__ : Any = (elem, weight)
if position > 0:
lowercase__ : Optional[int] = get_parent_position(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : int = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(SCREAMING_SNAKE_CASE )
else:
self._bubble_down(SCREAMING_SNAKE_CASE )
else:
self._bubble_down(SCREAMING_SNAKE_CASE )
def snake_case ( self : Any , SCREAMING_SNAKE_CASE : T ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
lowercase__ : Optional[Any] = self.position_map[elem]
if curr_pos == 0:
return None
lowercase__ : List[str] = get_parent_position(SCREAMING_SNAKE_CASE )
lowercase__ , lowercase__ : Optional[int] = self.heap[curr_pos]
lowercase__ , lowercase__ : int = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_up(SCREAMING_SNAKE_CASE )
return None
def snake_case ( self : Union[str, Any] , SCREAMING_SNAKE_CASE : T ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
lowercase__ : int = self.position_map[elem]
lowercase__ , lowercase__ : Tuple = self.heap[curr_pos]
lowercase__ : Any = get_child_left_position(SCREAMING_SNAKE_CASE )
lowercase__ : Tuple = get_child_right_position(SCREAMING_SNAKE_CASE )
if child_left_position < self.elements and child_right_position < self.elements:
lowercase__ , lowercase__ : Optional[int] = self.heap[child_left_position]
lowercase__ , lowercase__ : Tuple = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_down(SCREAMING_SNAKE_CASE )
if child_left_position < self.elements:
lowercase__ , lowercase__ : int = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_down(SCREAMING_SNAKE_CASE )
else:
return None
if child_right_position < self.elements:
lowercase__ , lowercase__ : Tuple = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return self._bubble_down(SCREAMING_SNAKE_CASE )
return None
def snake_case ( self : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int ):
# Swap the nodes at the given positions
lowercase__ : Dict = self.heap[nodea_pos][0]
lowercase__ : Dict = self.heap[nodea_pos][0]
lowercase__ , lowercase__ : int = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
lowercase__ : List[str] = nodea_pos
lowercase__ : Optional[int] = nodea_pos
class snake_case__(Generic[T] ):
"""simple docstring"""
def __init__( self : Optional[Any] ):
lowercase__ : dict[T, dict[T, int]] = {}
lowercase__ : int = 0
def __repr__( self : List[Any] ):
return str(self.connections )
def __len__( self : Union[str, Any] ):
return self.nodes
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : T ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
lowercase__ : List[Any] = {}
self.nodes += 1
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : T , SCREAMING_SNAKE_CASE : T , SCREAMING_SNAKE_CASE : int ):
# Add an edge between 2 nodes in the graph
self.add_node(SCREAMING_SNAKE_CASE )
self.add_node(SCREAMING_SNAKE_CASE )
lowercase__ : List[Any] = weight
lowercase__ : str = weight
def __lowerCamelCase ( lowerCamelCase__ , ):
"""simple docstring"""
lowercase__ : dict[T, int] = {node: maxsize for node in graph.connections}
lowercase__ : dict[T, T | None] = {node: None for node in graph.connections}
lowercase__ : MinPriorityQueue[T] = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(lowerCamelCase__ , lowerCamelCase__ )
if priority_queue.is_empty():
return dist, parent
# initialization
lowercase__ : List[Any] = priority_queue.extract_min()
lowercase__ : Tuple = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ : List[str] = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase__ , dist[neighbour] )
lowercase__ : str = node
# running prim's algorithm
while not priority_queue.is_empty():
lowercase__ : Union[str, Any] = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
lowercase__ : str = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(lowerCamelCase__ , dist[neighbour] )
lowercase__ : int = node
return dist, parent
| 130 |
# flake8: noqa
# Lint as: python3
lowerCAmelCase__ = [
'''VerificationMode''',
'''Version''',
'''disable_progress_bar''',
'''enable_progress_bar''',
'''is_progress_bar_enabled''',
'''experimental''',
]
from .info_utils import VerificationMode
from .logging import disable_progress_bar, enable_progress_bar, is_progress_bar_enabled
from .version import Version
from .experimental import experimental
| 130 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 355 | from __future__ import annotations
def lowercase( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> tuple[float, list[float]]:
'''simple docstring'''
UpperCamelCase = list(range(len(UpperCamelCase_ ) ) )
UpperCamelCase = [v / w for v, w in zip(UpperCamelCase_ , UpperCamelCase_ )]
index.sort(key=lambda UpperCamelCase_ : ratio[i] , reverse=UpperCamelCase_ )
UpperCamelCase = 0
UpperCamelCase = [0] * len(UpperCamelCase_ )
for i in index:
if weight[i] <= capacity:
UpperCamelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
UpperCamelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 165 | 0 |
"""simple docstring"""
def lowercase ( a__ : int ) -> int:
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(a__ , a__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(a__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 256 | """simple docstring"""
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
UpperCAmelCase = """base_with_context"""
def lowercase ( a__ : Optional[Any] , a__ : Optional[int] ) -> int:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''token_embedder''']['''embedding'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = ly_weight['''attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowercase ( a__ : List[Any] , a__ : Dict ) -> Optional[Any]:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''input_proj''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
for lyr_num, lyr in enumerate(model.encoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = ly_weight['''attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''encoder_norm''']['''scale'''] ) )
return model
def lowercase ( a__ : List[Any] , a__ : Union[str, Any] ) -> str:
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''time_emb_dense1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''Embed_0''']['''embedding'''] ) , requires_grad=a__ )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(weights['''continuous_inputs_projection''']['''kernel'''].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
_UpperCamelCase = weights[F'''layers_{lyr_num}''']
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_self_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_0''']['''DenseGeneral_0''']['''kernel'''].T ) )
_UpperCamelCase = ly_weight['''self_attention''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = ly_weight['''MultiHeadDotProductAttention_0''']
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''query''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''key''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''value''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(attention_weights['''out''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''pre_cross_attention_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''pre_mlp_layer_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(
torch.FloatTensor(ly_weight['''FiLMLayer_1''']['''DenseGeneral_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_0''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wi_1''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(ly_weight['''mlp''']['''wo''']['''kernel'''].T ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''decoder_norm''']['''scale'''] ) )
_UpperCamelCase = nn.Parameter(torch.FloatTensor(weights['''spec_out_dense''']['''kernel'''].T ) )
return model
def lowercase ( a__ : Union[str, Any] ) -> int:
_UpperCamelCase = checkpoints.load_tax_checkpoint(args.checkpoint_path )
_UpperCamelCase = jnp.tree_util.tree_map(onp.array , a__ )
_UpperCamelCase = [
'''from __gin__ import dynamic_registration''',
'''from music_spectrogram_diffusion.models.diffusion import diffusion_utils''',
'''diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0''',
'''diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()''',
]
_UpperCamelCase = os.path.join(args.checkpoint_path , '''..''' , '''config.gin''' )
_UpperCamelCase = inference.parse_training_gin_file(a__ , a__ )
_UpperCamelCase = inference.InferenceModel(args.checkpoint_path , a__ )
_UpperCamelCase = DDPMScheduler(beta_schedule='''squaredcos_cap_v2''' , variance_type='''fixed_large''' )
_UpperCamelCase = SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['''inputs'''] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
_UpperCamelCase = SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['''targets_context'''] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='''gated-gelu''' , )
_UpperCamelCase = TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['''targets_context'''] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
_UpperCamelCase = load_notes_encoder(ta_checkpoint['''target''']['''token_encoder'''] , a__ )
_UpperCamelCase = load_continuous_encoder(ta_checkpoint['''target''']['''continuous_encoder'''] , a__ )
_UpperCamelCase = load_decoder(ta_checkpoint['''target''']['''decoder'''] , a__ )
_UpperCamelCase = OnnxRuntimeModel.from_pretrained('''kashif/soundstream_mel_decoder''' )
_UpperCamelCase = SpectrogramDiffusionPipeline(
notes_encoder=a__ , continuous_encoder=a__ , decoder=a__ , scheduler=a__ , melgan=a__ , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--output_path""", default=None, type=str, required=True, help="""Path to the converted model.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument(
"""--checkpoint_path""",
default=F'''{MODEL}/checkpoint_500000''',
type=str,
required=False,
help="""Path to the original jax model checkpoint.""",
)
UpperCAmelCase = parser.parse_args()
main(args)
| 256 | 1 |
'''simple docstring'''
import argparse
import torch
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_from_original_stable_diffusion_ckpt
if __name__ == "__main__":
lowerCAmelCase: Tuple = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
# !wget https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml
parser.add_argument(
'--original_config_file',
default=None,
type=str,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--scheduler_type',
default='pndm',
type=str,
help='Type of scheduler to use. Should be one of [\'pndm\', \'lms\', \'ddim\', \'euler\', \'euler-ancestral\', \'dpm\']',
)
parser.add_argument(
'--pipeline_type',
default=None,
type=str,
help=(
'The pipeline type. One of \'FrozenOpenCLIPEmbedder\', \'FrozenCLIPEmbedder\', \'PaintByExample\''
'. If `None` pipeline will be automatically inferred.'
),
)
parser.add_argument(
'--image_size',
default=None,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--prediction_type',
default=None,
type=str,
help=(
'The prediction type that the model was trained on. Use \'epsilon\' for Stable Diffusion v1.X and Stable'
' Diffusion v2 Base. Use \'v_prediction\' for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
parser.add_argument(
'--stable_unclip',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP model. One of \'txt2img\' or \'img2img\'.',
)
parser.add_argument(
'--stable_unclip_prior',
type=str,
default=None,
required=False,
help='Set if this is a stable unCLIP txt2img model. Selects which prior to use. If `--stable_unclip` is set to `txt2img`, the karlo prior (https://huggingface.co/kakaobrain/karlo-v1-alpha/tree/main/prior) is selected by default.',
)
parser.add_argument(
'--clip_stats_path',
type=str,
help='Path to the clip stats file. Only required if the stable unclip model\'s config specifies `model.params.noise_aug_config.params.clip_stats_path`.',
required=False,
)
parser.add_argument(
'--controlnet', action='store_true', default=None, help='Set flag if this is a controlnet checkpoint.'
)
parser.add_argument('--half', action='store_true', help='Save weights in half precision.')
parser.add_argument(
'--vae_path',
type=str,
default=None,
required=False,
help='Set to a path, hub id to an already converted vae to not convert it again.',
)
lowerCAmelCase: Dict = parser.parse_args()
lowerCAmelCase: str = download_from_original_stable_diffusion_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
prediction_type=args.prediction_type,
model_type=args.pipeline_type,
extract_ema=args.extract_ema,
scheduler_type=args.scheduler_type,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
stable_unclip=args.stable_unclip,
stable_unclip_prior=args.stable_unclip_prior,
clip_stats_path=args.clip_stats_path,
controlnet=args.controlnet,
vae_path=args.vae_path,
)
if args.half:
pipe.to(torch_dtype=torch.floataa)
if args.controlnet:
# only save the controlnet model
pipe.controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
else:
pipe.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors) | 352 |
'''simple docstring'''
import re
from typing import Callable, List, Optional, Union
import tensorflow as tf
try:
from tensorflow.keras.optimizers.legacy import Adam
except ImportError:
from tensorflow.keras.optimizers import Adam
class a__( tf.keras.optimizers.schedules.LearningRateSchedule ):
def __init__( self : Optional[Any] , __snake_case : float , __snake_case : Callable , __snake_case : int , __snake_case : float = 1.0 , __snake_case : str = None , ):
super().__init__()
a : Dict = initial_learning_rate
a : Optional[int] = warmup_steps
a : Tuple = power
a : str = decay_schedule_fn
a : Optional[int] = name
def __call__( self : List[Any] , __snake_case : int ):
with tf.name_scope(self.name or 'WarmUp' ) as name:
# Implements polynomial warmup. i.e., if global_step < warmup_steps, the
# learning rate will be `global_step/num_warmup_steps * init_lr`.
a : Optional[int] = tf.cast(__snake_case , tf.floataa )
a : Tuple = tf.cast(self.warmup_steps , tf.floataa )
a : str = global_step_float / warmup_steps_float
a : Union[str, Any] = self.initial_learning_rate * tf.math.pow(__snake_case , self.power )
return tf.cond(
global_step_float < warmup_steps_float , lambda: warmup_learning_rate , lambda: self.decay_schedule_fn(step - self.warmup_steps ) , name=__snake_case , )
def lowercase_ ( self : Dict ):
return {
"initial_learning_rate": self.initial_learning_rate,
"decay_schedule_fn": self.decay_schedule_fn,
"warmup_steps": self.warmup_steps,
"power": self.power,
"name": self.name,
}
def lowerCamelCase__ ( _A , _A , _A , _A = 0.0 , _A = 0.9 , _A = 0.999 , _A = 1E-8 , _A = None , _A = None , _A = 0.0 , _A = 1.0 , _A = None , ):
a : Optional[int] = tf.keras.optimizers.schedules.PolynomialDecay(
initial_learning_rate=_A , decay_steps=num_train_steps - num_warmup_steps , end_learning_rate=init_lr * min_lr_ratio , power=_A , )
if num_warmup_steps:
a : Tuple = WarmUp(
initial_learning_rate=_A , decay_schedule_fn=_A , warmup_steps=_A , )
if weight_decay_rate > 0.0:
a : List[Any] = AdamWeightDecay(
learning_rate=_A , weight_decay_rate=_A , beta_a=_A , beta_a=_A , epsilon=_A , clipnorm=_A , global_clipnorm=_A , exclude_from_weight_decay=['LayerNorm', 'layer_norm', 'bias'] , include_in_weight_decay=_A , )
else:
a : Tuple = tf.keras.optimizers.Adam(
learning_rate=_A , beta_a=_A , beta_a=_A , epsilon=_A , clipnorm=_A , global_clipnorm=_A , )
# We return the optimizer and the LR scheduler in order to better track the
# evolution of the LR independently of the optimizer.
return optimizer, lr_schedule
class a__( lowerCamelCase__ ):
def __init__( self : Union[str, Any] , __snake_case : Union[float, tf.keras.optimizers.schedules.LearningRateSchedule] = 0.001 , __snake_case : float = 0.9 , __snake_case : float = 0.999 , __snake_case : float = 1e-7 , __snake_case : bool = False , __snake_case : float = 0.0 , __snake_case : Optional[List[str]] = None , __snake_case : Optional[List[str]] = None , __snake_case : str = "AdamWeightDecay" , **__snake_case : Optional[int] , ):
super().__init__(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , **__snake_case )
a : Tuple = weight_decay_rate
a : Optional[Any] = include_in_weight_decay
a : Any = exclude_from_weight_decay
@classmethod
def lowercase_ ( cls : Any , __snake_case : Optional[Any] ):
a : Any = {'WarmUp': WarmUp}
return super(__snake_case , cls ).from_config(__snake_case , custom_objects=__snake_case )
def lowercase_ ( self : str , __snake_case : Optional[int] , __snake_case : Dict , __snake_case : Optional[int] ):
super(__snake_case , self )._prepare_local(__snake_case , __snake_case , __snake_case )
a : List[str] = tf.constant(
self.weight_decay_rate , name='adam_weight_decay_rate' )
def lowercase_ ( self : int , __snake_case : Dict , __snake_case : List[Any] , __snake_case : str ):
a : Optional[Any] = self._do_use_weight_decay(var.name )
if do_decay:
return var.assign_sub(
learning_rate * var * apply_state[(var.device, var.dtype.base_dtype)]['weight_decay_rate'] , use_locking=self._use_locking , )
return tf.no_op()
def lowercase_ ( self : List[str] , __snake_case : List[str] , __snake_case : Any=None , **__snake_case : Tuple ):
a , a : Dict = list(zip(*__snake_case ) )
return super(__snake_case , self ).apply_gradients(zip(__snake_case , __snake_case ) , name=__snake_case , **__snake_case )
def lowercase_ ( self : List[Any] , __snake_case : List[str] , __snake_case : Union[str, Any] , __snake_case : Optional[int] ):
if apply_state is None:
return self._decayed_lr_t[var_dtype], {}
a : List[Any] = apply_state or {}
a : Optional[Any] = apply_state.get((var_device, var_dtype) )
if coefficients is None:
a : int = self._fallback_apply_state(__snake_case , __snake_case )
a : Any = coefficients
return coefficients["lr_t"], {"apply_state": apply_state}
def lowercase_ ( self : Dict , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : Tuple=None ):
a , a : List[Any] = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
a : List[Any] = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_dense(__snake_case , __snake_case , **__snake_case )
def lowercase_ ( self : Any , __snake_case : Optional[Any] , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int=None ):
a , a : Tuple = self._get_lr(var.device , var.dtype.base_dtype , __snake_case )
a : int = self._decay_weights_op(__snake_case , __snake_case , __snake_case )
with tf.control_dependencies([decay] ):
return super(__snake_case , self )._resource_apply_sparse(__snake_case , __snake_case , __snake_case , **__snake_case )
def lowercase_ ( self : Optional[Any] ):
a : Union[str, Any] = super().get_config()
config.update({'weight_decay_rate': self.weight_decay_rate} )
return config
def lowercase_ ( self : List[str] , __snake_case : Dict ):
if self.weight_decay_rate == 0:
return False
if self._include_in_weight_decay:
for r in self._include_in_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return True
if self._exclude_from_weight_decay:
for r in self._exclude_from_weight_decay:
if re.search(__snake_case , __snake_case ) is not None:
return False
return True
class a__( lowerCamelCase__ ):
def __init__( self : List[str] ):
a : List[Any] = []
a : Optional[int] = None
@property
def lowercase_ ( self : Any ):
if self._accum_steps is None:
a : List[Any] = tf.Variable(
tf.constant(0 , dtype=tf.intaa ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
return self._accum_steps.value()
@property
def lowercase_ ( self : Optional[int] ):
if not self._gradients:
raise ValueError('The accumulator should be called first to initialize the gradients' )
return [gradient.value() if gradient is not None else gradient for gradient in self._gradients]
def __call__( self : List[Any] , __snake_case : Any ):
if not self._gradients:
a : Union[str, Any] = self.step # Create the step variable.
self._gradients.extend(
[
tf.Variable(
tf.zeros_like(__snake_case ) , trainable=__snake_case , synchronization=tf.VariableSynchronization.ON_READ , aggregation=tf.VariableAggregation.ONLY_FIRST_REPLICA , )
if gradient is not None
else gradient
for gradient in gradients
] )
if len(__snake_case ) != len(self._gradients ):
raise ValueError(F"""Expected {len(self._gradients )} gradients, but got {len(__snake_case )}""" )
for accum_gradient, gradient in zip(self._gradients , __snake_case ):
if accum_gradient is not None and gradient is not None:
accum_gradient.assign_add(__snake_case )
self._accum_steps.assign_add(1 )
def lowercase_ ( self : Tuple ):
if not self._gradients:
return
self._accum_steps.assign(0 )
for gradient in self._gradients:
if gradient is not None:
gradient.assign(tf.zeros_like(__snake_case ) ) | 96 | 0 |
"""simple docstring"""
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
_lowercase = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = XLMProphetNetTokenizer
_lowerCamelCase: Optional[Any] = False
_lowerCamelCase: Union[str, Any] = True
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
super().setUp()
# We have a SentencePiece fixture for testing
A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ )
tokenizer.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = '[PAD]'
A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A_ ) ,A_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'[PAD]' )
self.assertEqual(vocab_keys[1] ,'[CLS]' )
self.assertEqual(vocab_keys[-1] ,'j' )
self.assertEqual(len(A_ ) ,1012 )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size ,1012 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = XLMProphetNetTokenizer(A_ ,keep_accents=A_ )
A = tokenizer.tokenize('This is a test' )
self.assertListEqual(A_ ,['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(A_ ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
A = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] ,)
A = tokenizer.convert_tokens_to_ids(A_ )
self.assertListEqual(
A_ ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] ,)
A = tokenizer.convert_ids_to_tokens(A_ )
self.assertListEqual(
A_ ,[
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] ,)
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
A = 'Hello World!'
A = [3_5389, 6672, 49, 2]
self.assertListEqual(A_ ,self.big_tokenizer.encode(A_ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
# fmt: off
A = {'input_ids': [[1_1073, 8_2783, 18, 26, 8_2783, 549, 5_1540, 248, 1_7209, 1301, 217, 20, 21_5186, 1325, 147, 1_7209, 1301, 217, 20, 5_6370, 53, 12_2020, 20, 1_6477, 27, 8_7355, 4548, 20, 4728, 7_8392, 17, 15_9969, 18, 26, 2_4491, 629, 15, 538, 2_2704, 5439, 15, 2788, 2_4491, 9885, 15, 4_3534, 605, 15, 814, 1_8403, 3_3200, 29, 15, 4_3534, 2_4458, 1_2410, 111, 2_4966, 8_3669, 9637, 14_4068, 26, 850, 2_2346, 27, 147, 2_4966, 8_3669, 8_3490, 26, 3_9113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 12_2020, 11_5785, 34, 816, 1339, 4_6887, 18, 147, 5_3905, 1951, 4_2238, 4_1170, 1_7732, 834, 436, 15, 2_7523, 9_8733, 217, 147, 5542, 4981, 930, 1_7347, 16, 2], [2_0091, 629, 94, 8_2786, 58, 490, 20, 1528, 84, 5_3905, 344, 8_0592, 11_0128, 1_8822, 5267, 1306, 62, 15_2537, 308, 7997, 401, 12_4427, 549, 3_5442, 225, 109, 1_5055, 2_5748, 147, 7119, 4_3712, 34, 767, 13_5366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 6_3784, 11_9466, 17, 14_7808, 8_8214, 18, 656, 81, 32, 3296, 1_0280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A_ ,model_name='microsoft/xprophetnet-large-wiki100-cased' ,revision='1acad1643ddd54a44df6a1b797ada8373685d90e' ,) | 74 |
"""simple docstring"""
import os
import re
import warnings
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_ta import TaTokenizer
else:
A = None
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/tokenizer.json''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/tokenizer.json''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/tokenizer.json''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/tokenizer.json''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/tokenizer.json''',
},
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
class __lowercase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = VOCAB_FILES_NAMES
__lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase = ['''input_ids''', '''attention_mask''']
__lowerCAmelCase = TaTokenizer
__lowerCAmelCase = []
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase="</s>" , _UpperCAmelCase="<unk>" , _UpperCAmelCase="<pad>" , _UpperCAmelCase=100 , _UpperCAmelCase=None , **_UpperCAmelCase , ):
# Add extra_ids to the special token list
if extra_ids > 0 and additional_special_tokens is None:
__a : Dict = [f"""<extra_id_{i}>""" for i in range(_UpperCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra special tokens
__a : Union[str, Any] = len(set(filter(lambda _UpperCAmelCase : bool('''extra_id_''' in str(_UpperCAmelCase ) ) , _UpperCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
f"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
''' provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids'''
''' tokens''' )
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , eos_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , extra_ids=_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__a : Union[str, Any] = vocab_file
__a : int = False if not self.vocab_file else True
__a : List[str] = extra_ids
@staticmethod
def _lowerCamelCase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
if pretrained_model_name_or_path in TaTokenizerFast.max_model_input_sizes:
__a : Any = TaTokenizerFast.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
'''This tokenizer was incorrectly instantiated with a model max length of'''
f""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
''' behavior is kept to avoid breaking backwards compatibility when padding/encoding with'''
''' `truncation is True`.\n- Be aware that you SHOULD NOT rely on'''
f""" {pretrained_model_name_or_path} automatically truncating your input to"""
f""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
f""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
''' `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please'''
''' instantiate this tokenizer with `model_max_length` set to your preferred value.''' , _UpperCAmelCase , )
return max_model_length
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__a : Optional[Any] = os.path.join(
_UpperCAmelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ):
copyfile(self.vocab_file , _UpperCAmelCase )
logger.info(f"""Copy vocab file to {out_vocab_file}""" )
return (out_vocab_file,)
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : str = token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return self.prefix_tokens + token_ids_a
else:
__a : List[str] = token_ids_a + [self.eos_token_id]
return self.prefix_tokens + token_ids_a + token_ids_a
def _lowerCamelCase ( self , _UpperCAmelCase , _UpperCAmelCase = None ):
__a : Tuple = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _lowerCamelCase ( self ):
return list(
set(filter(lambda _UpperCAmelCase : bool(re.search(R'''<extra_id_\d+>''' , _UpperCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _lowerCamelCase ( self ):
return [self.convert_tokens_to_ids(_UpperCAmelCase ) for token in self.get_sentinel_tokens()] | 160 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class __UpperCAmelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def A (_lowerCAmelCase : Optional[int] ):
A = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def A (self : Any , _lowerCAmelCase : Dict , _lowerCAmelCase : Tuple , *_lowerCAmelCase : int , **_lowerCAmelCase : int ):
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
A = kwargs.pop("""main_process_only""" , _lowerCAmelCase )
A = kwargs.pop("""in_order""" , _lowerCAmelCase )
if self.isEnabledFor(_lowerCAmelCase ):
if self._should_log(_lowerCAmelCase ):
A , A = self.process(_lowerCAmelCase , _lowerCAmelCase )
self.logger.log(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
elif in_order:
A = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A , A = self.process(_lowerCAmelCase , _lowerCAmelCase )
self.logger.log(_lowerCAmelCase , _lowerCAmelCase , *_lowerCAmelCase , **_lowerCAmelCase )
state.wait_for_everyone()
def __a ( UpperCAmelCase , UpperCAmelCase = None ) ->Any:
"""simple docstring"""
if log_level is None:
A = os.environ.get("""ACCELERATE_LOG_LEVEL""" , UpperCAmelCase )
A = logging.getLogger(UpperCAmelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(UpperCAmelCase , {} )
| 337 |
'''simple docstring'''
from collections import OrderedDict
from ...utils import logging
from .auto_factory import _BaseAutoModelClass, _LazyAutoMapping, auto_class_update
from .configuration_auto import CONFIG_MAPPING_NAMES
_lowerCamelCase : List[str] = logging.get_logger(__name__)
_lowerCamelCase : Any = OrderedDict(
[
# Base model mapping
('albert', 'FlaxAlbertModel'),
('bart', 'FlaxBartModel'),
('beit', 'FlaxBeitModel'),
('bert', 'FlaxBertModel'),
('big_bird', 'FlaxBigBirdModel'),
('blenderbot', 'FlaxBlenderbotModel'),
('blenderbot-small', 'FlaxBlenderbotSmallModel'),
('clip', 'FlaxCLIPModel'),
('distilbert', 'FlaxDistilBertModel'),
('electra', 'FlaxElectraModel'),
('gpt-sw3', 'FlaxGPT2Model'),
('gpt2', 'FlaxGPT2Model'),
('gpt_neo', 'FlaxGPTNeoModel'),
('gptj', 'FlaxGPTJModel'),
('longt5', 'FlaxLongT5Model'),
('marian', 'FlaxMarianModel'),
('mbart', 'FlaxMBartModel'),
('mt5', 'FlaxMT5Model'),
('opt', 'FlaxOPTModel'),
('pegasus', 'FlaxPegasusModel'),
('regnet', 'FlaxRegNetModel'),
('resnet', 'FlaxResNetModel'),
('roberta', 'FlaxRobertaModel'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormModel'),
('roformer', 'FlaxRoFormerModel'),
('t5', 'FlaxT5Model'),
('vision-text-dual-encoder', 'FlaxVisionTextDualEncoderModel'),
('vit', 'FlaxViTModel'),
('wav2vec2', 'FlaxWav2Vec2Model'),
('whisper', 'FlaxWhisperModel'),
('xglm', 'FlaxXGLMModel'),
('xlm-roberta', 'FlaxXLMRobertaModel'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for pre-training mapping
('albert', 'FlaxAlbertForPreTraining'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForPreTraining'),
('big_bird', 'FlaxBigBirdForPreTraining'),
('electra', 'FlaxElectraForPreTraining'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('t5', 'FlaxT5ForConditionalGeneration'),
('wav2vec2', 'FlaxWav2Vec2ForPreTraining'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Masked LM mapping
('albert', 'FlaxAlbertForMaskedLM'),
('bart', 'FlaxBartForConditionalGeneration'),
('bert', 'FlaxBertForMaskedLM'),
('big_bird', 'FlaxBigBirdForMaskedLM'),
('distilbert', 'FlaxDistilBertForMaskedLM'),
('electra', 'FlaxElectraForMaskedLM'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('roberta', 'FlaxRobertaForMaskedLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMaskedLM'),
('roformer', 'FlaxRoFormerForMaskedLM'),
('xlm-roberta', 'FlaxXLMRobertaForMaskedLM'),
]
)
_lowerCamelCase : List[str] = OrderedDict(
[
# Model for Seq2Seq Causal LM mapping
('bart', 'FlaxBartForConditionalGeneration'),
('blenderbot', 'FlaxBlenderbotForConditionalGeneration'),
('blenderbot-small', 'FlaxBlenderbotSmallForConditionalGeneration'),
('encoder-decoder', 'FlaxEncoderDecoderModel'),
('longt5', 'FlaxLongT5ForConditionalGeneration'),
('marian', 'FlaxMarianMTModel'),
('mbart', 'FlaxMBartForConditionalGeneration'),
('mt5', 'FlaxMT5ForConditionalGeneration'),
('pegasus', 'FlaxPegasusForConditionalGeneration'),
('t5', 'FlaxT5ForConditionalGeneration'),
]
)
_lowerCamelCase : int = OrderedDict(
[
# Model for Image-classsification
('beit', 'FlaxBeitForImageClassification'),
('regnet', 'FlaxRegNetForImageClassification'),
('resnet', 'FlaxResNetForImageClassification'),
('vit', 'FlaxViTForImageClassification'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('vision-encoder-decoder', 'FlaxVisionEncoderDecoderModel'),
]
)
_lowerCamelCase : Optional[int] = OrderedDict(
[
# Model for Causal LM mapping
('bart', 'FlaxBartForCausalLM'),
('bert', 'FlaxBertForCausalLM'),
('big_bird', 'FlaxBigBirdForCausalLM'),
('electra', 'FlaxElectraForCausalLM'),
('gpt-sw3', 'FlaxGPT2LMHeadModel'),
('gpt2', 'FlaxGPT2LMHeadModel'),
('gpt_neo', 'FlaxGPTNeoForCausalLM'),
('gptj', 'FlaxGPTJForCausalLM'),
('opt', 'FlaxOPTForCausalLM'),
('roberta', 'FlaxRobertaForCausalLM'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForCausalLM'),
('xglm', 'FlaxXGLMForCausalLM'),
('xlm-roberta', 'FlaxXLMRobertaForCausalLM'),
]
)
_lowerCamelCase : Optional[Any] = OrderedDict(
[
# Model for Sequence Classification mapping
('albert', 'FlaxAlbertForSequenceClassification'),
('bart', 'FlaxBartForSequenceClassification'),
('bert', 'FlaxBertForSequenceClassification'),
('big_bird', 'FlaxBigBirdForSequenceClassification'),
('distilbert', 'FlaxDistilBertForSequenceClassification'),
('electra', 'FlaxElectraForSequenceClassification'),
('mbart', 'FlaxMBartForSequenceClassification'),
('roberta', 'FlaxRobertaForSequenceClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForSequenceClassification'),
('roformer', 'FlaxRoFormerForSequenceClassification'),
('xlm-roberta', 'FlaxXLMRobertaForSequenceClassification'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
# Model for Question Answering mapping
('albert', 'FlaxAlbertForQuestionAnswering'),
('bart', 'FlaxBartForQuestionAnswering'),
('bert', 'FlaxBertForQuestionAnswering'),
('big_bird', 'FlaxBigBirdForQuestionAnswering'),
('distilbert', 'FlaxDistilBertForQuestionAnswering'),
('electra', 'FlaxElectraForQuestionAnswering'),
('mbart', 'FlaxMBartForQuestionAnswering'),
('roberta', 'FlaxRobertaForQuestionAnswering'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForQuestionAnswering'),
('roformer', 'FlaxRoFormerForQuestionAnswering'),
('xlm-roberta', 'FlaxXLMRobertaForQuestionAnswering'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
# Model for Token Classification mapping
('albert', 'FlaxAlbertForTokenClassification'),
('bert', 'FlaxBertForTokenClassification'),
('big_bird', 'FlaxBigBirdForTokenClassification'),
('distilbert', 'FlaxDistilBertForTokenClassification'),
('electra', 'FlaxElectraForTokenClassification'),
('roberta', 'FlaxRobertaForTokenClassification'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForTokenClassification'),
('roformer', 'FlaxRoFormerForTokenClassification'),
('xlm-roberta', 'FlaxXLMRobertaForTokenClassification'),
]
)
_lowerCamelCase : Dict = OrderedDict(
[
# Model for Multiple Choice mapping
('albert', 'FlaxAlbertForMultipleChoice'),
('bert', 'FlaxBertForMultipleChoice'),
('big_bird', 'FlaxBigBirdForMultipleChoice'),
('distilbert', 'FlaxDistilBertForMultipleChoice'),
('electra', 'FlaxElectraForMultipleChoice'),
('roberta', 'FlaxRobertaForMultipleChoice'),
('roberta-prelayernorm', 'FlaxRobertaPreLayerNormForMultipleChoice'),
('roformer', 'FlaxRoFormerForMultipleChoice'),
('xlm-roberta', 'FlaxXLMRobertaForMultipleChoice'),
]
)
_lowerCamelCase : int = OrderedDict(
[
('bert', 'FlaxBertForNextSentencePrediction'),
]
)
_lowerCamelCase : Union[str, Any] = OrderedDict(
[
('speech-encoder-decoder', 'FlaxSpeechEncoderDecoderModel'),
('whisper', 'FlaxWhisperForConditionalGeneration'),
]
)
_lowerCamelCase : Any = OrderedDict(
[
('whisper', 'FlaxWhisperForAudioClassification'),
]
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_MAPPING_NAMES)
_lowerCamelCase : Dict = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_PRETRAINING_MAPPING_NAMES)
_lowerCamelCase : Tuple = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MASKED_LM_MAPPING_NAMES)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
)
_lowerCamelCase : Union[str, Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING_NAMES)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES)
_lowerCamelCase : int = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[str] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES
)
_lowerCamelCase : Tuple = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES
)
_lowerCamelCase : List[Any] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES
)
_lowerCamelCase : str = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING_NAMES
)
_lowerCamelCase : Any = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING_NAMES
)
_lowerCamelCase : Optional[int] = _LazyAutoMapping(
CONFIG_MAPPING_NAMES, FLAX_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_MAPPING
_lowerCamelCase : Optional[Any] = auto_class_update(FlaxAutoModel)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_PRETRAINING_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForPreTraining, head_doc='pretraining')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_CAUSAL_LM_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(FlaxAutoModelForCausalLM, head_doc='causal language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MASKED_LM_MAPPING
_lowerCamelCase : List[str] = auto_class_update(FlaxAutoModelForMaskedLM, head_doc='masked language modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSeqaSeqLM, head_doc='sequence-to-sequence language modeling', checkpoint_for_example='t5-base'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
_lowerCamelCase : Tuple = auto_class_update(
FlaxAutoModelForSequenceClassification, head_doc='sequence classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_QUESTION_ANSWERING_MAPPING
_lowerCamelCase : Any = auto_class_update(FlaxAutoModelForQuestionAnswering, head_doc='question answering')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
_lowerCamelCase : str = auto_class_update(
FlaxAutoModelForTokenClassification, head_doc='token classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_MULTIPLE_CHOICE_MAPPING
_lowerCamelCase : Tuple = auto_class_update(FlaxAutoModelForMultipleChoice, head_doc='multiple choice')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING
_lowerCamelCase : List[Any] = auto_class_update(
FlaxAutoModelForNextSentencePrediction, head_doc='next sentence prediction'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_lowerCamelCase : Union[str, Any] = auto_class_update(
FlaxAutoModelForImageClassification, head_doc='image classification'
)
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_VISION_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(FlaxAutoModelForVisionaSeq, head_doc='vision-to-text modeling')
class __UpperCAmelCase ( _BaseAutoModelClass ):
'''simple docstring'''
__lowerCAmelCase = FLAX_MODEL_FOR_SPEECH_SEQ_2_SEQ_MAPPING
_lowerCamelCase : Optional[int] = auto_class_update(
FlaxAutoModelForSpeechSeqaSeq, head_doc='sequence-to-sequence speech-to-text modeling'
)
| 337 | 1 |
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE = 100_0000 ):
_snake_case = set(range(3 , lowercase__ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase__ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase__ , lowercase__ ) ) )
_snake_case = [float(lowercase__ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase__ , limit + 1 , lowercase__ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f'''{solution() = }''') | 341 |
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
__lowerCAmelCase : Optional[int] ='\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
__lowerCAmelCase : Optional[Any] ='\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
__lowerCAmelCase : Dict ='\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def _UpperCamelCase ( lowercase__ , lowercase__ ):
return float((preds == labels).mean() )
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = simple_accuracy(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : List[str] = float(fa_score(y_true=lowercase__ , y_pred=lowercase__ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _UpperCamelCase ( lowercase__ , lowercase__ ):
__SCREAMING_SNAKE_CASE : Optional[int] = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = np.array(lowercase__ )
__SCREAMING_SNAKE_CASE : str = en_sentvecs.shape[0]
# mean centering
__SCREAMING_SNAKE_CASE : Tuple = en_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : Optional[int] = in_sentvecs - np.mean(lowercase__ , axis=0 )
__SCREAMING_SNAKE_CASE : str = cdist(lowercase__ , lowercase__ , '''cosine''' )
__SCREAMING_SNAKE_CASE : int = np.array(range(lowercase__ ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = sim.argsort(axis=1 )[:, :10]
__SCREAMING_SNAKE_CASE : str = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowercase ( datasets.Metric ):
'''simple docstring'''
def __magic_name__( self :Tuple ) -> Tuple:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
'''references''': datasets.Value('''int64''' )
if self.config_name != '''cvit-mkb-clsr'''
else datasets.Sequence(datasets.Value('''float32''' ) ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' if self.config_name != '''cvit-mkb-clsr''' else None , )
def __magic_name__( self :List[str] , lowerCAmelCase__ :Optional[Any] , lowerCAmelCase__ :Tuple ) -> str:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(lowerCAmelCase__ , lowerCAmelCase__ )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(lowerCAmelCase__ , lowerCAmelCase__ )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(lowerCAmelCase__ , lowerCAmelCase__ )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '''
'''"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '''
'''"wiki-ner"]''' )
| 9 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json""",
"""google/bigbird-roberta-large""": """https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json""",
"""google/bigbird-base-trivia-itc""": """https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json""",
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class A__ ( snake_case_ ):
lowercase = 'big_bird'
def __init__( self : Any , a : Dict=50_358 , a : Tuple=768 , a : Any=12 , a : Tuple=12 , a : List[Any]=3_072 , a : Any="gelu_new" , a : Tuple=0.1 , a : Tuple=0.1 , a : Optional[int]=4_096 , a : Union[str, Any]=2 , a : Optional[Any]=0.0_2 , a : List[Any]=1E-12 , a : Dict=True , a : List[Any]=0 , a : List[str]=1 , a : Optional[Any]=2 , a : Dict=66 , a : Tuple="block_sparse" , a : Dict=True , a : Union[str, Any]=False , a : List[str]=64 , a : Optional[int]=3 , a : Optional[Any]=None , **a : int , ):
'''simple docstring'''
super().__init__(
pad_token_id=a , bos_token_id=a , eos_token_id=a , sep_token_id=a , **a , )
lowerCAmelCase__ : Dict = vocab_size
lowerCAmelCase__ : List[str] = max_position_embeddings
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Optional[Any] = num_hidden_layers
lowerCAmelCase__ : Dict = num_attention_heads
lowerCAmelCase__ : Optional[int] = intermediate_size
lowerCAmelCase__ : Union[str, Any] = hidden_act
lowerCAmelCase__ : Dict = hidden_dropout_prob
lowerCAmelCase__ : List[str] = attention_probs_dropout_prob
lowerCAmelCase__ : List[Any] = initializer_range
lowerCAmelCase__ : Dict = type_vocab_size
lowerCAmelCase__ : Optional[Any] = layer_norm_eps
lowerCAmelCase__ : List[Any] = use_cache
lowerCAmelCase__ : int = rescale_embeddings
lowerCAmelCase__ : List[Any] = attention_type
lowerCAmelCase__ : Any = use_bias
lowerCAmelCase__ : Union[str, Any] = block_size
lowerCAmelCase__ : Optional[int] = num_random_blocks
lowerCAmelCase__ : Optional[int] = classifier_dropout
class A__ ( snake_case_ ):
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
lowerCAmelCase__ : List[Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
lowerCAmelCase__ : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] )
| 353 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_blip""": [
"""BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlipConfig""",
"""BlipTextConfig""",
"""BlipVisionConfig""",
],
"""processing_blip""": ["""BlipProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BlipImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlipModel""",
"""BlipPreTrainedModel""",
"""BlipForConditionalGeneration""",
"""BlipForQuestionAnswering""",
"""BlipVisionModel""",
"""BlipTextModel""",
"""BlipForImageTextRetrieval""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBlipModel""",
"""TFBlipPreTrainedModel""",
"""TFBlipForConditionalGeneration""",
"""TFBlipForQuestionAnswering""",
"""TFBlipVisionModel""",
"""TFBlipTextModel""",
"""TFBlipForImageTextRetrieval""",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 307 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__)
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self , *_a , **_a ):
"""simple docstring"""
warnings.warn(
"""The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use LayoutLMv2ImageProcessor instead.""" , _a , )
super().__init__(*_a , **_a )
| 291 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __magic_name__ ( unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = tempfile.mkdtemp()
# fmt: off
lowerCamelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
lowerCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
lowerCamelCase = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
lowerCamelCase = os.path.join(self.tmpdirname , _a )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(_a , _a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return BertTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self , **_a ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase = [Image.fromarray(np.moveaxis(_a , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = self.get_image_processor()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase = self.get_image_processor(do_normalize=_a , padding_value=1.0 )
lowerCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=_a , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = image_processor(_a , return_tensors="""np""" )
lowerCamelCase = processor(images=_a , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = processor(text=_a )
lowerCamelCase = tokenizer(_a )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(_a ):
processor()
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase = processor.batch_decode(_a )
lowerCamelCase = tokenizer.batch_decode(_a )
self.assertListEqual(_a , _a )
def _lowerCAmelCase ( self ):
"""simple docstring"""
lowerCamelCase = self.get_image_processor()
lowerCamelCase = self.get_tokenizer()
lowerCamelCase = VisionTextDualEncoderProcessor(tokenizer=_a , image_processor=_a )
lowerCamelCase = """lower newer"""
lowerCamelCase = self.prepare_image_inputs()
lowerCamelCase = processor(text=_a , images=_a )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 291 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase : Any = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase : List[str] = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_lowercase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 362 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class UpperCamelCase__( lowerCAmelCase , unittest.TestCase ):
__magic_name__ : Any = ShapEPipeline
__magic_name__ : Tuple = ["prompt"]
__magic_name__ : Optional[int] = ["prompt"]
__magic_name__ : Dict = [
"num_images_per_prompt",
"num_inference_steps",
"generator",
"latents",
"guidance_scale",
"frame_size",
"output_type",
"return_dict",
]
__magic_name__ : Optional[int] = False
@property
def a__( self : Optional[Any] )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Dict )-> Dict:
"""simple docstring"""
return 32
@property
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
return self.time_input_dim * 4
@property
def a__( self : List[str] )-> str:
"""simple docstring"""
return 8
@property
def a__( self : int )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
return tokenizer
@property
def a__( self : Tuple )-> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(lowerCAmelCase )
@property
def a__( self : str )-> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''num_attention_heads''': 2,
'''attention_head_dim''': 16,
'''embedding_dim''': self.time_input_dim,
'''num_embeddings''': 32,
'''embedding_proj_dim''': self.text_embedder_hidden_size,
'''time_embed_dim''': self.time_embed_dim,
'''num_layers''': 1,
'''clip_embed_dim''': self.time_input_dim * 2,
'''additional_embeddings''': 0,
'''time_embed_act_fn''': '''gelu''',
'''norm_in_type''': '''layer''',
'''encoder_hid_proj_type''': None,
'''added_emb_type''': None,
}
UpperCAmelCase = PriorTransformer(**lowerCAmelCase )
return model
@property
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = {
'''param_shapes''': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'''d_latent''': self.time_input_dim,
'''d_hidden''': self.renderer_dim,
'''n_output''': 12,
'''background''': (
0.1,
0.1,
0.1,
),
}
UpperCAmelCase = ShapERenderer(**lowerCAmelCase )
return model
def a__( self : Any )-> Tuple:
"""simple docstring"""
UpperCAmelCase = self.dummy_prior
UpperCAmelCase = self.dummy_text_encoder
UpperCAmelCase = self.dummy_tokenizer
UpperCAmelCase = self.dummy_renderer
UpperCAmelCase = HeunDiscreteScheduler(
beta_schedule='''exp''' , num_train_timesteps=1024 , prediction_type='''sample''' , use_karras_sigmas=lowerCAmelCase , clip_sample=lowerCAmelCase , clip_sample_range=1.0 , )
UpperCAmelCase = {
'''prior''': prior,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''renderer''': renderer,
'''scheduler''': scheduler,
}
return components
def a__( self : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any]=0 )-> Optional[Any]:
"""simple docstring"""
if str(lowerCAmelCase ).startswith('''mps''' ):
UpperCAmelCase = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase = {
'''prompt''': '''horse''',
'''generator''': generator,
'''num_inference_steps''': 1,
'''frame_size''': 32,
'''output_type''': '''np''',
}
return inputs
def a__( self : int )-> Dict:
"""simple docstring"""
UpperCAmelCase = '''cpu'''
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = pipe(**self.get_dummy_inputs(lowerCAmelCase ) )
UpperCAmelCase = output.images[0]
UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
UpperCAmelCase = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def a__( self : Optional[int] )-> str:
"""simple docstring"""
UpperCAmelCase = torch_device == '''cpu'''
UpperCAmelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=lowerCAmelCase , relax_max_difference=lowerCAmelCase , )
def a__( self : int )-> List[str]:
"""simple docstring"""
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = self.pipeline_class(**lowerCAmelCase )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = 1
UpperCAmelCase = 2
UpperCAmelCase = self.get_dummy_inputs(lowerCAmelCase )
for key in inputs.keys():
if key in self.batch_params:
UpperCAmelCase = batch_size * [inputs[key]]
UpperCAmelCase = pipe(**lowerCAmelCase , num_images_per_prompt=lowerCAmelCase )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Dict )-> Union[str, Any]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__( self : Optional[Any] )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/shap_e/test_shap_e_np_out.npy''' )
UpperCAmelCase = ShapEPipeline.from_pretrained('''openai/shap-e''' )
UpperCAmelCase = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase = torch.Generator(device=lowerCAmelCase ).manual_seed(0 )
UpperCAmelCase = pipe(
'''a shark''' , generator=lowerCAmelCase , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type='''np''' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
| 91 | 0 |
from pathlib import Path
import numpy as np
from PIL import Image
def lowerCAmelCase__ ( lowerCamelCase_ : np.ndarray):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2989 * r + 0.5870 * g + 0.1140 * b
def lowerCAmelCase__ ( lowerCamelCase_ : np.ndarray):
'''simple docstring'''
return (gray > 127) & (gray <= 255)
def lowerCAmelCase__ ( lowerCamelCase_ : np.ndarray ,lowerCamelCase_ : np.ndarray):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = np.zeros_like(lowerCamelCase_)
lowerCAmelCase__ : str = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1))
# Copy image to padded image
lowerCAmelCase__ : int = image
# Iterate over image & apply kernel
for x in range(image.shape[1]):
for y in range(image.shape[0]):
lowerCAmelCase__ : Tuple = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
lowerCAmelCase__ : Union[str, Any] = int(summation > 0)
return output
if __name__ == "__main__":
# read original image
__snake_case : str =Path(__file__).resolve().parent / 'image_data' / 'lena.jpg'
__snake_case : List[str] =np.array(Image.open(lena_path))
# kernel to be applied
__snake_case : Union[str, Any] =np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
__snake_case : Tuple =dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
__snake_case : str =Image.fromarray(output).convert('RGB')
pil_img.save('result_dilation.png')
| 129 |
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for data in source_data:
for i, el in enumerate(lowerCamelCase_):
if len(lowerCamelCase_) < i + 1:
data_lists.append([])
data_lists[i].append(float(lowerCamelCase_))
return data_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : list[list[float]] = []
for dlist, weight in zip(lowerCamelCase_ ,lowerCamelCase_):
lowerCAmelCase__ : str = min(lowerCamelCase_)
lowerCAmelCase__ : Optional[int] = max(lowerCamelCase_)
lowerCAmelCase__ : list[float] = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)))
except ZeroDivisionError:
score.append(1)
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind))
except ZeroDivisionError:
score.append(0)
# weight not 0 or 1
else:
lowerCAmelCase__ : Optional[int] = f"""Invalid weight of {weight:f} provided"""
raise ValueError(lowerCamelCase_)
score_lists.append(lowerCamelCase_)
return score_lists
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]]):
'''simple docstring'''
lowerCAmelCase__ : list[float] = [0 for i in range(len(score_lists[0]))]
for slist in score_lists:
for j, ele in enumerate(lowerCamelCase_):
lowerCAmelCase__ : str = final_scores[j] + ele
return final_scores
def lowerCAmelCase__ ( lowerCamelCase_ : list[list[float]] ,lowerCamelCase_ : list[int]):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = get_data(lowerCamelCase_)
lowerCAmelCase__ : Dict = calculate_each_score(lowerCamelCase_ ,lowerCamelCase_)
lowerCAmelCase__ : Union[str, Any] = generate_final_scores(lowerCamelCase_)
# append scores to source data
for i, ele in enumerate(lowerCamelCase_):
source_data[i].append(lowerCamelCase_)
return source_data
| 129 | 1 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( a__: Any , a__: float = 0.0 , a__: float = 1.0 ) -> int:
'''simple docstring'''
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase__ :Tuple = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :List[Any] = ['''BeitFeatureExtractor''']
lowerCAmelCase__ :Optional[Any] = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :int = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ :Tuple = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ :Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 185 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'nvidia/segformer-b0-finetuned-ade-512-512': (
'https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512/resolve/main/config.json'
),
# See all SegFormer models at https://huggingface.co/models?filter=segformer
}
class lowerCAmelCase__ ( UpperCamelCase__ ):
__a = '''segformer'''
def __init__( self : Any , _lowerCamelCase : Dict=3 , _lowerCamelCase : Optional[int]=4 , _lowerCamelCase : Any=[2, 2, 2, 2] , _lowerCamelCase : Tuple=[8, 4, 2, 1] , _lowerCamelCase : Any=[32, 64, 160, 256] , _lowerCamelCase : str=[7, 3, 3, 3] , _lowerCamelCase : List[Any]=[4, 2, 2, 2] , _lowerCamelCase : List[Any]=[1, 2, 5, 8] , _lowerCamelCase : int=[4, 4, 4, 4] , _lowerCamelCase : List[str]="gelu" , _lowerCamelCase : Optional[int]=0.0 , _lowerCamelCase : Optional[Any]=0.0 , _lowerCamelCase : Optional[int]=0.1 , _lowerCamelCase : List[str]=0.0_2 , _lowerCamelCase : Optional[Any]=0.1 , _lowerCamelCase : List[Any]=1e-6 , _lowerCamelCase : Optional[int]=256 , _lowerCamelCase : Dict=255 , **_lowerCamelCase : List[Any] , ):
super().__init__(**UpperCamelCase_ )
if "reshape_last_stage" in kwargs and kwargs["reshape_last_stage"] is False:
warnings.warn(
'''Reshape_last_stage is set to False in this config. This argument is deprecated and will soon be'''
''' removed, as the behaviour will default to that of reshape_last_stage = True.''' , UpperCamelCase_ , )
_snake_case = num_channels
_snake_case = num_encoder_blocks
_snake_case = depths
_snake_case = sr_ratios
_snake_case = hidden_sizes
_snake_case = patch_sizes
_snake_case = strides
_snake_case = mlp_ratios
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = classifier_dropout_prob
_snake_case = initializer_range
_snake_case = drop_path_rate
_snake_case = layer_norm_eps
_snake_case = decoder_hidden_size
_snake_case = kwargs.get('''reshape_last_stage''' , UpperCamelCase_ )
_snake_case = semantic_loss_ignore_index
class lowerCAmelCase__ ( UpperCamelCase__ ):
__a = version.parse("""1.11""" )
@property
def lowercase ( self : Optional[int] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase ( self : Tuple ):
return 1e-4
@property
def lowercase ( self : List[Any] ):
return 12
| 288 |
import socket
def _a ( ):
"""simple docstring"""
lowercase__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
lowercase__ = socket.gethostname()
lowercase__ = 1_23_12
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
lowercase__ = sock.recv(10_24 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 110 | 0 |
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
if curr_ind == len(UpperCamelCase__ ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(UpperCamelCase__ ) ):
if valid_connection(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
# Insert current vertex into path as next transition
snake_case_ = next_ver
# Validate created path
if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , curr_ind + 1 ):
return True
# Backtrack
snake_case_ = -1
return False
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ = 0 ):
'''simple docstring'''
snake_case_ = [-1] * (len(UpperCamelCase__ ) + 1)
# initialize start and end of path with starting index
snake_case_ = snake_case_ = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(UpperCamelCase__ , UpperCamelCase__ , 1 ) else []
| 363 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Tuple = {
"""configuration_xlm_roberta_xl""": [
"""XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XLMRobertaXLConfig""",
"""XLMRobertaXLOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : int = [
"""XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLMRobertaXLForCausalLM""",
"""XLMRobertaXLForMaskedLM""",
"""XLMRobertaXLForMultipleChoice""",
"""XLMRobertaXLForQuestionAnswering""",
"""XLMRobertaXLForSequenceClassification""",
"""XLMRobertaXLForTokenClassification""",
"""XLMRobertaXLModel""",
"""XLMRobertaXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaXLConfig,
XLMRobertaXLOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta_xl import (
XLM_ROBERTA_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaXLForCausalLM,
XLMRobertaXLForMaskedLM,
XLMRobertaXLForMultipleChoice,
XLMRobertaXLForQuestionAnswering,
XLMRobertaXLForSequenceClassification,
XLMRobertaXLForTokenClassification,
XLMRobertaXLModel,
XLMRobertaXLPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 200 | 0 |
"""simple docstring"""
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class A_ :
'''simple docstring'''
def __init__( self , lowercase_ , lowercase_=13 , lowercase_=30 , lowercase_=2 , lowercase_=3 , lowercase_=True , lowercase_=True , lowercase_=32 , lowercase_=5 , lowercase_=4 , lowercase_=37 , lowercase_="gelu" , lowercase_=0.1 , lowercase_=0.1 , lowercase_=10 , lowercase_=0.02 , lowercase_=3 , lowercase_=None , lowercase_=2 , ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = parent
UpperCAmelCase_ : int = batch_size
UpperCAmelCase_ : int = image_size
UpperCAmelCase_ : List[Any] = patch_size
UpperCAmelCase_ : Any = num_channels
UpperCAmelCase_ : Optional[int] = is_training
UpperCAmelCase_ : Union[str, Any] = use_labels
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : str = num_hidden_layers
UpperCAmelCase_ : List[str] = num_attention_heads
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : List[Any] = hidden_dropout_prob
UpperCAmelCase_ : Union[str, Any] = attention_probs_dropout_prob
UpperCAmelCase_ : str = type_sequence_label_size
UpperCAmelCase_ : str = initializer_range
UpperCAmelCase_ : Union[str, Any] = scope
UpperCAmelCase_ : str = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ : int = (image_size // patch_size) ** 2
UpperCAmelCase_ : Optional[Any] = num_patches + 2
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ : Tuple = None
if self.use_labels:
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self ):
"""simple docstring"""
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = DeiTForMaskedImageModeling(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[Any] = model(lowercase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ : List[str] = 1
UpperCAmelCase_ : Optional[Any] = DeiTForMaskedImageModeling(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : Optional[int] = model(lowercase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_ ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = self.type_sequence_label_size
UpperCAmelCase_ : Union[str, Any] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : List[str] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ : Union[str, Any] = 1
UpperCAmelCase_ : Optional[int] = DeiTForImageClassification(lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCAmelCase_ : Dict = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ : List[Any] = model(lowercase_ , labels=lowercase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) : Dict = config_and_inputs
UpperCAmelCase_ : Optional[int] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A_ (lowercase__ ,lowercase__ ,unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : Tuple = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
SCREAMING_SNAKE_CASE__ : List[str] = False
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Any = DeiTModelTester(self )
UpperCAmelCase_ : Optional[int] = ConfigTester(self , config_class=lowercase_ , has_text_modality=lowercase_ , hidden_size=37 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def UpperCamelCase__ ( self ):
"""simple docstring"""
pass
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : List[Any] = model_class(lowercase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase_ , nn.Linear ) )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ : Dict = model_class(lowercase_ )
UpperCAmelCase_ : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ : str = [*signature.parameters.keys()]
UpperCAmelCase_ : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase_ )
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase_ )
def UpperCamelCase__ ( self , lowercase_ , lowercase_ , lowercase_=False ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = super()._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Union[str, Any] = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowercase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ : Optional[int] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : List[Any] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Dict = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : int = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Optional[int] = True
for model_class in self.all_model_classes:
if model_class in get_values(lowercase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ : List[str] = model_class(lowercase_ )
model.gradient_checkpointing_enable()
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : Optional[int] = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
UpperCAmelCase_ : Any = model(**lowercase_ ).loss
loss.backward()
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ : Dict = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowercase_ ),
*get_values(lowercase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=F"""Testing {model_class} with {problem_type["title"]}""" ):
UpperCAmelCase_ : str = problem_type["title"]
UpperCAmelCase_ : List[Any] = problem_type["num_labels"]
UpperCAmelCase_ : Union[str, Any] = model_class(lowercase_ )
model.to(lowercase_ )
model.train()
UpperCAmelCase_ : int = self._prepare_for_class(lowercase_ , lowercase_ , return_labels=lowercase_ )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ : List[Any] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ : Tuple = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowercase_ ) as warning_list:
UpperCAmelCase_ : List[str] = model(**lowercase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
F"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Union[str, Any] = DeiTModel.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __a ( ):
UpperCAmelCase_ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class A_ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def UpperCamelCase__ ( self ):
"""simple docstring"""
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : Tuple = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowercase_ )
UpperCAmelCase_ : List[str] = self.default_image_processor
UpperCAmelCase_ : List[str] = prepare_img()
UpperCAmelCase_ : int = image_processor(images=lowercase_ , return_tensors="pt" ).to(lowercase_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_ : Dict = model(**lowercase_ )
# verify the logits
UpperCAmelCase_ : List[str] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowercase_ )
UpperCAmelCase_ : str = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase_ , atol=1E-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def UpperCamelCase__ ( self ):
"""simple docstring"""
UpperCAmelCase_ : List[str] = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ : str = self.default_image_processor
UpperCAmelCase_ : Union[str, Any] = prepare_img()
UpperCAmelCase_ : List[Any] = image_processor(images=lowercase_ , return_tensors="pt" )
UpperCAmelCase_ : List[str] = inputs.pixel_values.to(lowercase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ : int = model(lowercase_ )
| 61 |
"""simple docstring"""
import argparse
from collections import defaultdict
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
UpperCAmelCase_ : int = f"""{file}_{class_name}_{test_name}"""
done_test[_id] += 1
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : List[Any] = f.readlines()
UpperCAmelCase_ : int = f"""class {class_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{4 * " "}def {test_name}("""
UpperCAmelCase_ : Optional[Any] = f"""{8 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : Tuple = f"""{16 * " "}{correct_line.split()[0]}"""
UpperCAmelCase_ : int = False
UpperCAmelCase_ : Union[str, Any] = False
UpperCAmelCase_ : str = False
UpperCAmelCase_ : Optional[Any] = False
UpperCAmelCase_ : List[str] = 0
UpperCAmelCase_ : Optional[int] = 0
UpperCAmelCase_ : int = []
for line in lines:
if line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Tuple = True
elif in_class and line.startswith(__lowerCamelCase ):
UpperCAmelCase_ : Optional[int] = True
elif in_class and in_func and (line.startswith(__lowerCamelCase ) or line.startswith(__lowerCamelCase )):
UpperCAmelCase_ : Any = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ : Union[str, Any] = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ : Any = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f"""{spaces * " "}{correct_line}""" )
UpperCAmelCase_ : int = False
else:
new_lines.append(__lowerCamelCase )
with open(__lowerCamelCase, "w" ) as f:
for line in new_lines:
f.write(__lowerCamelCase )
def __a ( __lowerCamelCase, __lowerCamelCase=None ):
if fail is not None:
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Tuple = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ : str = None
with open(__lowerCamelCase, "r" ) as f:
UpperCAmelCase_ : Optional[int] = f.readlines()
UpperCAmelCase_ : Any = defaultdict(__lowerCamelCase )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('--correct_filename', help='filename of tests with expected result')
parser.add_argument('--fail_filename', help='filename of test failures', type=str, default=None)
_a = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 61 | 1 |
import logging
import math
from functools import partial
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import torch
from .tensor_utils import tensor_tree_map, tree_map
def _lowerCAmelCase ( A__: Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = []
if isinstance(_A , _A ):
for v in tree.values():
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , (list, tuple) ):
for t in tree:
shapes.extend(_fetch_dims(_A ) )
elif isinstance(_A , torch.Tensor ):
shapes.append(tree.shape )
else:
raise ValueError('''Not supported''' )
return shapes
@torch.jit.ignore
def _lowerCAmelCase ( A__: Dict , A__: Tuple ):
'''simple docstring'''
UpperCAmelCase = []
for d in reversed(_A ):
idx.append(flat_idx % d )
UpperCAmelCase = flat_idx // d
return tuple(reversed(_A ) )
@torch.jit.ignore
def _lowerCAmelCase ( A__: int , A__: List[Any] , A__: str , A__: str = None , A__: str = None , ):
'''simple docstring'''
def reduce_edge_list(A__: Tuple ) -> None:
UpperCAmelCase = True
for i in range(len(_A ) ):
UpperCAmelCase = -1 * (i + 1)
l[reversed_idx] &= tally
UpperCAmelCase = l[reversed_idx]
if start_edges is None:
UpperCAmelCase = [s == 0 for s in start]
reduce_edge_list(_A )
if end_edges is None:
UpperCAmelCase = [e == (d - 1) for e, d in zip(_A , _A )]
reduce_edge_list(_A )
# Base cases. Either start/end are empty and we're done, or the final,
# one-dimensional tensor can be simply sliced
if len(_A ) == 0:
return [()]
elif len(_A ) == 1:
return [(slice(start[0] , end[0] + 1 ),)]
UpperCAmelCase = []
UpperCAmelCase = []
# Dimensions common to start and end can be selected directly
for s, e in zip(_A , _A ):
if s == e:
path_list.append(slice(_A , s + 1 ) )
else:
break
UpperCAmelCase = tuple(_A )
UpperCAmelCase = len(_A )
# start == end, and we're done
if divergence_idx == len(_A ):
return [path]
def upper() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase = start[divergence_idx]
return tuple(
path + (slice(_A , sdi + 1 ),) + s
for s in _get_minimal_slice_set(
start[divergence_idx + 1 :] , [d - 1 for d in dims[divergence_idx + 1 :]] , dims[divergence_idx + 1 :] , start_edges=start_edges[divergence_idx + 1 :] , end_edges=[True for _ in end_edges[divergence_idx + 1 :]] , ) )
def lower() -> Tuple[Tuple[slice, ...], ...]:
assert start_edges is not None
assert end_edges is not None
UpperCAmelCase = end[divergence_idx]
return tuple(
path + (slice(_A , edi + 1 ),) + s
for s in _get_minimal_slice_set(
[0 for _ in start[divergence_idx + 1 :]] , end[divergence_idx + 1 :] , dims[divergence_idx + 1 :] , start_edges=[True for _ in start_edges[divergence_idx + 1 :]] , end_edges=end_edges[divergence_idx + 1 :] , ) )
# If both start and end are at the edges of the subtree rooted at
# divergence_idx, we can just select the whole subtree at once
if start_edges[divergence_idx] and end_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] + 1 ),) )
# If just start is at the edge, we can grab almost all of the subtree,
# treating only the ragged bottom edge as an edge case
elif start_edges[divergence_idx]:
slices.append(path + (slice(start[divergence_idx] , end[divergence_idx] ),) )
slices.extend(lower() )
# Analogous to the previous case, but the top is ragged this time
elif end_edges[divergence_idx]:
slices.extend(upper() )
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] + 1 ),) )
# If both sides of the range are ragged, we need to handle both sides
# separately. If there's contiguous meat in between them, we can index it
# in one big chunk
else:
slices.extend(upper() )
UpperCAmelCase = end[divergence_idx] - start[divergence_idx]
if middle_ground > 1:
slices.append(path + (slice(start[divergence_idx] + 1 , end[divergence_idx] ),) )
slices.extend(lower() )
return slices
@torch.jit.ignore
def _lowerCAmelCase ( A__: Optional[int] , A__: int , A__: Dict , A__: Tuple ):
'''simple docstring'''
UpperCAmelCase = t.shape[:no_batch_dims]
UpperCAmelCase = list(_flat_idx_to_idx(_A , _A ) )
# _get_minimal_slice_set is inclusive
UpperCAmelCase = list(_flat_idx_to_idx(flat_end - 1 , _A ) )
# Get an ordered list of slices to perform
UpperCAmelCase = _get_minimal_slice_set(
_A , _A , _A , )
UpperCAmelCase = [t[s] for s in slices]
return torch.cat([s.view((-1,) + t.shape[no_batch_dims:] ) for s in sliced_tensors] )
def _lowerCAmelCase ( A__: int , A__: str , A__: Optional[Any] , A__: str , A__: Optional[int] = False , A__: Dict = None , A__: int = False , ):
'''simple docstring'''
if not (len(_A ) > 0):
raise ValueError('''Must provide at least one input''' )
UpperCAmelCase = [shape[:no_batch_dims] for shape in _fetch_dims(_A )]
UpperCAmelCase = tuple([max(_A ) for s in zip(*_A )] )
def _prep_inputs(A__: Optional[Any] ) -> torch.Tensor:
if not low_mem:
if not sum(t.shape[:no_batch_dims] ) == no_batch_dims:
UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
UpperCAmelCase = t.reshape(-1 , *t.shape[no_batch_dims:] )
else:
UpperCAmelCase = t.expand(orig_batch_dims + t.shape[no_batch_dims:] )
return t
UpperCAmelCase = tensor_tree_map(_prep_inputs , _A )
UpperCAmelCase = None
if _out is not None:
UpperCAmelCase = tensor_tree_map(lambda A__ : t.view([-1] + list(t.shape[no_batch_dims:] ) ) , _out )
UpperCAmelCase = 1
for d in orig_batch_dims:
flat_batch_dim *= d
UpperCAmelCase = flat_batch_dim // chunk_size + (flat_batch_dim % chunk_size != 0)
def _select_chunk(A__: str ) -> torch.Tensor:
return t[i : i + chunk_size] if t.shape[0] != 1 else t
UpperCAmelCase = 0
UpperCAmelCase = prepped_outputs
for _ in range(_A ):
# Chunk the input
if not low_mem:
UpperCAmelCase = _select_chunk
else:
UpperCAmelCase = partial(
_chunk_slice , flat_start=_A , flat_end=min(_A , i + chunk_size ) , no_batch_dims=len(_A ) , )
UpperCAmelCase = tensor_tree_map(_A , _A )
# Run the layer on the chunk
UpperCAmelCase = layer(**_A )
# Allocate space for the output
if out is None:
UpperCAmelCase = tensor_tree_map(lambda A__ : t.new_zeros((flat_batch_dim,) + t.shape[1:] ) , _A )
# Put the chunk in its pre-allocated space
if isinstance(_A , _A ):
def assign(A__: Union[str, Any] , A__: Optional[int] ) -> None:
for k, v in da.items():
if isinstance(_A , _A ):
assign(_A , da[k] )
else:
if _add_into_out:
v[i : i + chunk_size] += da[k]
else:
UpperCAmelCase = da[k]
assign(_A , _A )
elif isinstance(_A , _A ):
for xa, xa in zip(_A , _A ):
if _add_into_out:
xa[i : i + chunk_size] += xa
else:
UpperCAmelCase = xa
elif isinstance(_A , torch.Tensor ):
if _add_into_out:
out[i : i + chunk_size] += output_chunk
else:
UpperCAmelCase = output_chunk
else:
raise ValueError('''Not supported''' )
i += chunk_size
UpperCAmelCase = tensor_tree_map(lambda A__ : t.view(orig_batch_dims + t.shape[1:] ) , _A )
return out
class lowercase :
'''simple docstring'''
def __init__( self , _snake_case = 512 , ) -> List[str]:
"""simple docstring"""
UpperCAmelCase = max_chunk_size
UpperCAmelCase = None
UpperCAmelCase = None
def snake_case_ ( self , _snake_case , _snake_case , _snake_case ) -> int:
"""simple docstring"""
logging.info('''Tuning chunk size...''' )
if min_chunk_size >= self.max_chunk_size:
return min_chunk_size
UpperCAmelCase = [2**l for l in range(int(math.log(self.max_chunk_size , 2 ) ) + 1 )]
UpperCAmelCase = [c for c in candidates if c > min_chunk_size]
UpperCAmelCase = [min_chunk_size] + candidates
candidates[-1] += 4
def test_chunk_size(_snake_case ) -> bool:
try:
with torch.no_grad():
fn(*snake_case__ , chunk_size=snake_case__ )
return True
except RuntimeError:
return False
UpperCAmelCase = 0
UpperCAmelCase = len(snake_case__ ) - 1
while i > min_viable_chunk_size_index:
UpperCAmelCase = test_chunk_size(candidates[i] )
if not viable:
UpperCAmelCase = (min_viable_chunk_size_index + i) // 2
else:
UpperCAmelCase = i
UpperCAmelCase = (i + len(snake_case__ ) - 1) // 2
return candidates[min_viable_chunk_size_index]
def snake_case_ ( self , _snake_case , _snake_case ) -> int:
"""simple docstring"""
UpperCAmelCase = True
for aa, aa in zip(snake_case__ , snake_case__ ):
assert type(snake_case__ ) == type(snake_case__ )
if isinstance(snake_case__ , (list, tuple) ):
consistent &= self._compare_arg_caches(snake_case__ , snake_case__ )
elif isinstance(snake_case__ , snake_case__ ):
UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda _snake_case : x[0] )]
UpperCAmelCase = [v for _, v in sorted(aa.items() , key=lambda _snake_case : x[0] )]
consistent &= self._compare_arg_caches(snake_case__ , snake_case__ )
else:
consistent &= aa == aa
return consistent
def snake_case_ ( self , _snake_case , _snake_case , _snake_case , ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = True
UpperCAmelCase = tree_map(lambda _snake_case : a.shape if isinstance(snake_case__ , torch.Tensor ) else a , snake_case__ , snake_case__ )
if self.cached_arg_data is not None:
# If args have changed shape/value, we need to re-tune
assert len(self.cached_arg_data ) == len(snake_case__ )
UpperCAmelCase = self._compare_arg_caches(self.cached_arg_data , snake_case__ )
else:
# Otherwise, we can reuse the precomputed value
UpperCAmelCase = False
if not consistent:
UpperCAmelCase = self._determine_favorable_chunk_size(
snake_case__ , snake_case__ , snake_case__ , )
UpperCAmelCase = arg_data
assert self.cached_chunk_size is not None
return self.cached_chunk_size
| 353 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 152 | 0 |
from string import ascii_uppercase
__A : Dict = {str(ord(c) - 55): c for c in ascii_uppercase}
def __SCREAMING_SNAKE_CASE ( UpperCamelCase__ , UpperCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(a__ , a__ ):
raise TypeError('''int() can\'t convert non-string with explicit base''' )
if num < 0:
raise ValueError('''parameter must be positive int''' )
if isinstance(a__ , a__ ):
raise TypeError('''\'str\' object cannot be interpreted as an integer''' )
if isinstance(a__ , a__ ):
raise TypeError('''\'float\' object cannot be interpreted as an integer''' )
if base in (0, 1):
raise ValueError('''base must be >= 2''' )
if base > 36:
raise ValueError('''base must be <= 36''' )
UpperCAmelCase = ''''''
UpperCAmelCase = 0
UpperCAmelCase = 0
while div != 1:
UpperCAmelCase , UpperCAmelCase = divmod(a__ , a__ )
if base >= 11 and 9 < mod < 36:
UpperCAmelCase = ALPHABET_VALUES[str(a__ )]
else:
UpperCAmelCase = str(a__ )
new_value += actual_value
UpperCAmelCase = num // base
UpperCAmelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(a__ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1_000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 273 |
'''simple docstring'''
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class lowerCAmelCase__ ( a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = FlaxAutoencoderKL
@property
def UpperCAmelCase__ ( self : Tuple ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = jax.random.PRNGKey(0 )
__SCREAMING_SNAKE_CASE = jax.random.uniform(__SCREAMING_SNAKE_CASE , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def UpperCAmelCase__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
__SCREAMING_SNAKE_CASE = self.dummy_input
return init_dict, inputs_dict
| 267 | 0 |
'''simple docstring'''
import datasets
__UpperCAmelCase = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
__UpperCAmelCase = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
__UpperCAmelCase = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _snake_case ( A , A ) -> Any:
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''sts-b''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ ) -> int:
return {"accuracy": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )} | 228 |
'''simple docstring'''
def _snake_case ( A = 10 ) -> str:
if not isinstance(A , A ) or n < 0:
raise ValueError('''Invalid input''' )
lowerCAmelCase__ = 10**n
lowerCAmelCase__ = 28433 * (pow(2 , 7830457 , A )) + 1
return str(number % modulus )
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(10) = }""") | 228 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCamelCase = {
'''configuration_groupvit''': [
'''GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''GroupViTConfig''',
'''GroupViTOnnxConfig''',
'''GroupViTTextConfig''',
'''GroupViTVisionConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GroupViTModel''',
'''GroupViTPreTrainedModel''',
'''GroupViTTextModel''',
'''GroupViTVisionModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
'''TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFGroupViTModel''',
'''TFGroupViTPreTrainedModel''',
'''TFGroupViTTextModel''',
'''TFGroupViTVisionModel''',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 87 |
'''simple docstring'''
UpperCAmelCase_ : Dict = [
999,
800,
799,
600,
599,
500,
400,
399,
377,
355,
333,
311,
288,
266,
244,
222,
200,
199,
177,
155,
133,
111,
88,
66,
44,
22,
0,
]
UpperCAmelCase_ : Any = [
999,
976,
952,
928,
905,
882,
858,
857,
810,
762,
715,
714,
572,
429,
428,
286,
285,
238,
190,
143,
142,
118,
95,
71,
47,
24,
0,
]
UpperCAmelCase_ : Tuple = [
999,
988,
977,
966,
955,
944,
933,
922,
911,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
350,
300,
299,
266,
233,
200,
199,
179,
159,
140,
120,
100,
99,
88,
77,
66,
55,
44,
33,
22,
11,
0,
]
UpperCAmelCase_ : Dict = [
999,
995,
992,
989,
985,
981,
978,
975,
971,
967,
964,
961,
957,
956,
951,
947,
942,
937,
933,
928,
923,
919,
914,
913,
908,
903,
897,
892,
887,
881,
876,
871,
870,
864,
858,
852,
846,
840,
834,
828,
827,
820,
813,
806,
799,
792,
785,
784,
777,
770,
763,
756,
749,
742,
741,
733,
724,
716,
707,
699,
698,
688,
677,
666,
656,
655,
645,
634,
623,
613,
612,
598,
584,
570,
569,
555,
541,
527,
526,
505,
484,
483,
462,
440,
439,
396,
395,
352,
351,
308,
307,
264,
263,
220,
219,
176,
132,
88,
44,
0,
]
UpperCAmelCase_ : Tuple = [
999,
997,
995,
992,
990,
988,
986,
984,
981,
979,
977,
975,
972,
970,
968,
966,
964,
961,
959,
957,
956,
954,
951,
949,
946,
944,
941,
939,
936,
934,
931,
929,
926,
924,
921,
919,
916,
914,
913,
910,
907,
905,
902,
899,
896,
893,
891,
888,
885,
882,
879,
877,
874,
871,
870,
867,
864,
861,
858,
855,
852,
849,
846,
843,
840,
837,
834,
831,
828,
827,
824,
821,
817,
814,
811,
808,
804,
801,
798,
795,
791,
788,
785,
784,
780,
777,
774,
770,
766,
763,
760,
756,
752,
749,
746,
742,
741,
737,
733,
730,
726,
722,
718,
714,
710,
707,
703,
699,
698,
694,
690,
685,
681,
677,
673,
669,
664,
660,
656,
655,
650,
646,
641,
636,
632,
627,
622,
618,
613,
612,
607,
602,
596,
591,
586,
580,
575,
570,
569,
563,
557,
551,
545,
539,
533,
527,
526,
519,
512,
505,
498,
491,
484,
483,
474,
466,
457,
449,
440,
439,
428,
418,
407,
396,
395,
381,
366,
352,
351,
330,
308,
307,
286,
264,
263,
242,
220,
219,
176,
175,
132,
131,
88,
44,
0,
]
UpperCAmelCase_ : Union[str, Any] = [
999,
991,
982,
974,
966,
958,
950,
941,
933,
925,
916,
908,
900,
899,
874,
850,
825,
800,
799,
700,
600,
500,
400,
300,
200,
100,
0,
]
UpperCAmelCase_ : Tuple = [
999,
992,
985,
978,
971,
964,
957,
949,
942,
935,
928,
921,
914,
907,
900,
899,
879,
859,
840,
820,
800,
799,
766,
733,
700,
699,
650,
600,
599,
500,
499,
400,
399,
300,
299,
200,
199,
100,
99,
0,
]
UpperCAmelCase_ : int = [
999,
996,
992,
989,
985,
982,
979,
975,
972,
968,
965,
961,
958,
955,
951,
948,
944,
941,
938,
934,
931,
927,
924,
920,
917,
914,
910,
907,
903,
900,
899,
891,
884,
876,
869,
861,
853,
846,
838,
830,
823,
815,
808,
800,
799,
788,
777,
766,
755,
744,
733,
722,
711,
700,
699,
688,
677,
666,
655,
644,
633,
622,
611,
600,
599,
585,
571,
557,
542,
528,
514,
500,
499,
485,
471,
457,
442,
428,
414,
400,
399,
379,
359,
340,
320,
300,
299,
279,
259,
240,
220,
200,
199,
166,
133,
100,
99,
66,
33,
0,
]
| 200 | 0 |
from bisect import bisect
from itertools import accumulate
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__lowerCamelCase : int = sorted(zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , key=lambda SCREAMING_SNAKE_CASE__ : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase , __lowerCamelCase : Dict = [i[0] for i in r], [i[1] for i in r]
__lowerCamelCase : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase : Union[str, Any] = bisect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 194 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['ConvNextFeatureExtractor']
lowercase_ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 194 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' ,'''False''' ) ) is not True ,reason='''Skipping test because should only be run when releasing minor transformers version''' ,)
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 650, '''eval_accuracy''': 0.6, '''eval_loss''': 0.9},
},
{
'''framework''': '''tensorflow''',
'''script''': '''run_tf.py''',
'''model_name_or_path''': '''distilbert-base-cased''',
'''instance_type''': '''ml.g4dn.xlarge''',
'''results''': {'''train_runtime''': 600, '''eval_accuracy''': 0.3, '''eval_loss''': 0.9},
},
] )
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if self.framework == "pytorch":
subprocess.run(
F"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=__UpperCamelCase , )
assert hasattr(self , '''env''' )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=1 ) -> Optional[int]:
'''simple docstring'''
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"{self.env.base_job_name}-single" , instance_count=__UpperCamelCase , instance_type=self.instance_type , debugger_hook_config=__UpperCamelCase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
TrainingJobAnalytics(__UpperCamelCase ).export_csv(F"{self.env.test_path}/{job_name}_metrics.csv" )
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = self.create_estimator()
# run training
estimator.fit()
# result dataframe
__lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
__lowercase = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
__lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"{estimator.latest_training_job.name}.json" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , __UpperCamelCase ) | 210 | import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Tuple = logging.get_logger(__name__)
lowerCAmelCase__ : Union[str, Any] = {'''vocab_file''': '''sentencepiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase__ : Dict = {
'''vocab_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/sentencepiece.model''',
},
'''tokenizer_file''': {
'''google/rembert''': '''https://huggingface.co/google/rembert/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase__ : List[Any] = {
'''google/rembert''': 2_56,
}
lowerCAmelCase__ : List[str] = '''▁'''
class __snake_case ( _lowerCamelCase ):
__lowerCamelCase = VOCAB_FILES_NAMES
__lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase = RemBertTokenizer
def __init__( self , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase="[CLS]" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<unk>" , __UpperCamelCase="[SEP]" , __UpperCamelCase="<pad>" , __UpperCamelCase="[CLS]" , __UpperCamelCase="[MASK]" , **__UpperCamelCase , ) -> Tuple:
'''simple docstring'''
snake_case__ : Optional[Any] = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
snake_case__ : int = do_lower_case
snake_case__ : Any = remove_space
snake_case__ : List[Any] = keep_accents
snake_case__ : Dict = vocab_file
snake_case__ : int = False if not self.vocab_file else True
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : Dict = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __a ( self , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1]
return [1] + ([0] * len(__UpperCamelCase )) + [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> List[int]:
'''simple docstring'''
snake_case__ : List[Any] = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , __UpperCamelCase , __UpperCamelCase = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error('Vocabulary path ({}) should be a directory'.format(__UpperCamelCase ) )
return
snake_case__ : List[str] = os.path.join(
__UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,)
| 143 | 0 |
from __future__ import annotations
def _snake_case( SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
print(f'Vertex\tShortest Distance from vertex {src}' )
for i, d in enumerate(__lowerCAmelCase ):
print(f'{i}\t\t{d}' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Dict:
'''simple docstring'''
for j in range(__lowerCAmelCase ):
A__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
return True
return False
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple ) -> list[float]:
'''simple docstring'''
A__ = [float('inf' )] * vertex_count
A__ = 0.0
for _ in range(vertex_count - 1 ):
for j in range(__lowerCAmelCase ):
A__ = (graph[j][k] for k in ['''src''', '''dst''', '''weight'''])
if distance[u] != float('inf' ) and distance[u] + w < distance[v]:
A__ = distance[u] + w
A__ = check_negative_cycle(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if negative_cycle_exists:
raise Exception('Negative cycle found' )
return distance
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = int(input("Enter number of vertices: ").strip())
lowercase_ = int(input("Enter number of edges: ").strip())
lowercase_ = [{} for _ in range(E)]
for i in range(E):
print("Edge ", i + 1)
lowercase_ , lowercase_ , lowercase_ = (
int(x)
for x in input("Enter source, destination, weight: ").strip().split(" ")
)
lowercase_ = {"src": src, "dst": dest, "weight": weight}
lowercase_ = int(input("\nEnter shortest path source:").strip())
lowercase_ = bellman_ford(graph, V, E, source)
print_distance(shortest_distance, 0)
| 362 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowercase_ = getLogger(__name__)
lowercase_ = "cuda" if torch.cuda.is_available() else "cpu"
def _snake_case( SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int = 8 , SCREAMING_SNAKE_CASE__ : str = DEFAULT_DEVICE , SCREAMING_SNAKE_CASE__ : Optional[Any]=False , SCREAMING_SNAKE_CASE__ : Dict="summarization" , SCREAMING_SNAKE_CASE__ : Any=None , **SCREAMING_SNAKE_CASE__ : str , ) -> Dict:
'''simple docstring'''
A__ = Path(SCREAMING_SNAKE_CASE__ ).open('w' , encoding='utf-8' )
A__ = str(SCREAMING_SNAKE_CASE__ )
A__ = AutoModelForSeqaSeqLM.from_pretrained(SCREAMING_SNAKE_CASE__ ).to(SCREAMING_SNAKE_CASE__ )
if fpaa:
A__ = model.half()
A__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
logger.info(f'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
A__ = time.time()
# update config with task specific params
use_task_specific_params(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if prefix is None:
A__ = prefix or getattr(model.config , 'prefix' , '' ) or ''
for examples_chunk in tqdm(list(chunks(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) ):
A__ = [prefix + text for text in examples_chunk]
A__ = tokenizer(SCREAMING_SNAKE_CASE__ , return_tensors='pt' , truncation=SCREAMING_SNAKE_CASE__ , padding='longest' ).to(SCREAMING_SNAKE_CASE__ )
A__ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **SCREAMING_SNAKE_CASE__ , )
A__ = tokenizer.batch_decode(SCREAMING_SNAKE_CASE__ , skip_special_tokens=SCREAMING_SNAKE_CASE__ , clean_up_tokenization_spaces=SCREAMING_SNAKE_CASE__ )
for hypothesis in dec:
fout.write(hypothesis + '\n' )
fout.flush()
fout.close()
A__ = int(time.time() - start_time ) # seconds
A__ = len(SCREAMING_SNAKE_CASE__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _snake_case( ) -> Tuple:
'''simple docstring'''
return datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S' )
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int]=True ) -> Dict:
'''simple docstring'''
A__ = argparse.ArgumentParser()
parser.add_argument('model_name' , type=SCREAMING_SNAKE_CASE__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('input_path' , type=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.source' )
parser.add_argument('save_path' , type=SCREAMING_SNAKE_CASE__ , help='where to save summaries' )
parser.add_argument('--reference_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help='like cnn_dm/test.target' )
parser.add_argument('--score_path' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default='metrics.json' , help='where to save metrics' )
parser.add_argument('--device' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='cuda, cuda:1, cpu etc.' )
parser.add_argument(
'--prefix' , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , default=SCREAMING_SNAKE_CASE__ , help='will be added to the begininng of src examples' )
parser.add_argument('--task' , type=SCREAMING_SNAKE_CASE__ , default='summarization' , help='used for task_specific_params + metrics' )
parser.add_argument('--bs' , type=SCREAMING_SNAKE_CASE__ , default=8 , required=SCREAMING_SNAKE_CASE__ , help='batch size' )
parser.add_argument(
'--n_obs' , type=SCREAMING_SNAKE_CASE__ , default=-1 , required=SCREAMING_SNAKE_CASE__ , help='How many observations. Defaults to all.' )
parser.add_argument('--fp16' , action='store_true' )
parser.add_argument('--dump-args' , action='store_true' , help='print the custom hparams with the results' )
parser.add_argument(
'--info' , nargs='?' , type=SCREAMING_SNAKE_CASE__ , const=datetime_now() , help=(
'use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'
' lang=en-ru. If no value is passed, the current datetime string will be used.'
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
A__ , A__ = parser.parse_known_args()
A__ = parse_numeric_n_bool_cl_kwargs(SCREAMING_SNAKE_CASE__ )
if parsed_args and verbose:
print(f'parsed the following generate kwargs: {parsed_args}' )
A__ = [' ' + x.rstrip() if 't5' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
A__ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('Can\'t mix --fp16 and --device cpu' )
A__ = generate_summaries_or_translations(
SCREAMING_SNAKE_CASE__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **SCREAMING_SNAKE_CASE__ , )
if args.reference_path is None:
return {}
# Compute scores
A__ = calculate_bleu if 'translation' in args.task else calculate_rouge
A__ = [x.rstrip() for x in open(args.save_path ).readlines()]
A__ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(SCREAMING_SNAKE_CASE__ )]
A__ = score_fn(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
scores.update(SCREAMING_SNAKE_CASE__ )
if args.dump_args:
scores.update(SCREAMING_SNAKE_CASE__ )
if args.info:
A__ = args.info
if verbose:
print(SCREAMING_SNAKE_CASE__ )
if args.score_path is not None:
json.dump(SCREAMING_SNAKE_CASE__ , open(args.score_path , 'w' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 282 | 0 |
from argparse import ArgumentParser, Namespace
from typing import Any, List, Optional
from ..pipelines import Pipeline, get_supported_tasks, pipeline
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from fastapi import Body, FastAPI, HTTPException
from fastapi.routing import APIRoute
from pydantic import BaseModel
from starlette.responses import JSONResponse
from uvicorn import run
_UpperCAmelCase : Any = True
except (ImportError, AttributeError):
_UpperCAmelCase : Any = object
def UpperCAmelCase__ ( *lowerCamelCase, **lowerCamelCase ):
pass
_UpperCAmelCase : Any = False
_UpperCAmelCase : Dict = logging.get_logger("transformers-cli/serving")
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[int] = pipeline(
task=args.task, model=args.model if args.model else None, config=args.config, tokenizer=args.tokenizer, device=args.device, )
return ServeCommand(lowerCamelCase, args.host, args.port, args.workers )
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = 42
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
class __lowerCAmelCase ( lowerCAmelCase):
@staticmethod
def SCREAMING_SNAKE_CASE ( _lowerCAmelCase: ArgumentParser ):
lowercase :Optional[int] = parser.add_parser(
"serve" , help="CLI tool to run inference requests through REST and GraphQL endpoints." )
serve_parser.add_argument(
"--task" , type=_lowerCAmelCase , choices=get_supported_tasks() , help="The task to run the pipeline on" , )
serve_parser.add_argument("--host" , type=_lowerCAmelCase , default="localhost" , help="Interface the server will listen on." )
serve_parser.add_argument("--port" , type=_lowerCAmelCase , default=88_88 , help="Port the serving will listen to." )
serve_parser.add_argument("--workers" , type=_lowerCAmelCase , default=1 , help="Number of http workers" )
serve_parser.add_argument("--model" , type=_lowerCAmelCase , help="Model's name or path to stored model." )
serve_parser.add_argument("--config" , type=_lowerCAmelCase , help="Model's config name or path to stored model." )
serve_parser.add_argument("--tokenizer" , type=_lowerCAmelCase , help="Tokenizer name to use." )
serve_parser.add_argument(
"--device" , type=_lowerCAmelCase , default=-1 , help="Indicate the device to run onto, -1 indicates CPU, >= 0 indicates GPU (default: -1)" , )
serve_parser.set_defaults(func=_lowerCAmelCase )
def __init__( self: Any , _lowerCAmelCase: Pipeline , _lowerCAmelCase: str , _lowerCAmelCase: int , _lowerCAmelCase: int ):
lowercase :Any = pipeline
lowercase :Optional[Any] = host
lowercase :Dict = port
lowercase :List[Any] = workers
if not _serve_dependencies_installed:
raise RuntimeError(
"Using serve command requires FastAPI and uvicorn. "
"Please install transformers with [serving]: pip install \"transformers[serving]\"."
"Or install FastAPI and uvicorn separately." )
else:
logger.info(F"Serving model over {host}:{port}" )
lowercase :List[str] = FastAPI(
routes=[
APIRoute(
"/" , self.model_info , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["GET"] , ),
APIRoute(
"/tokenize" , self.tokenize , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["POST"] , ),
APIRoute(
"/detokenize" , self.detokenize , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["POST"] , ),
APIRoute(
"/forward" , self.forward , response_model=_lowerCAmelCase , response_class=_lowerCAmelCase , methods=["POST"] , ),
] , timeout=6_00 , )
def SCREAMING_SNAKE_CASE ( self: Any ):
run(self._app , host=self.host , port=self.port , workers=self.workers )
def SCREAMING_SNAKE_CASE ( self: Union[str, Any] ):
return ServeModelInfoResult(infos=vars(self._pipeline.model.config ) )
def SCREAMING_SNAKE_CASE ( self: List[str] , _lowerCAmelCase: str = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase: bool = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) ):
try:
lowercase :Optional[int] = self._pipeline.tokenizer.tokenize(_lowerCAmelCase )
if return_ids:
lowercase :int = self._pipeline.tokenizer.convert_tokens_to_ids(_lowerCAmelCase )
return ServeTokenizeResult(tokens=_lowerCAmelCase , tokens_ids=_lowerCAmelCase )
else:
return ServeTokenizeResult(tokens=_lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(_lowerCAmelCase )} )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: List[int] = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase: bool = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , _lowerCAmelCase: bool = Body(_lowerCAmelCase , embed=_lowerCAmelCase ) , ):
try:
lowercase :Optional[Any] = self._pipeline.tokenizer.decode(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
return ServeDeTokenizeResult(model="" , text=_lowerCAmelCase )
except Exception as e:
raise HTTPException(status_code=5_00 , detail={"model": "", "error": str(_lowerCAmelCase )} )
async def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: Union[str, Any]=Body(_lowerCAmelCase , embed=_lowerCAmelCase ) ):
# Check we don't have empty string
if len(_lowerCAmelCase ) == 0:
return ServeForwardResult(output=[] , attention=[] )
try:
# Forward through the model
lowercase :Optional[int] = self._pipeline(_lowerCAmelCase )
return ServeForwardResult(output=_lowerCAmelCase )
except Exception as e:
raise HTTPException(5_00 , {"error": str(_lowerCAmelCase )} )
| 236 |
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# Return True if there is node that has not iterated.
lowercase :Union[str, Any] = [False] * len(lowerCamelCase )
lowercase :Union[str, Any] = []
queue.append(lowerCamelCase )
lowercase :Optional[Any] = True
while queue:
lowercase :Optional[Any] = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowerCamelCase )
lowercase :Dict = True
lowercase :Dict = u
return visited[t]
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
# This array is filled by BFS and to store path
lowercase :Optional[int] = [-1] * (len(lowerCamelCase ))
lowercase :int = 0
while bfs(lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase ):
lowercase :int = float("Inf" )
lowercase :Any = sink
while s != source:
# Find the minimum value in select path
lowercase :Any = min(lowerCamelCase, graph[parent[s]][s] )
lowercase :Dict = parent[s]
max_flow += path_flow
lowercase :Union[str, Any] = sink
while v != source:
lowercase :List[Any] = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase :Any = parent[v]
return max_flow
_UpperCAmelCase : Optional[Any] = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_UpperCAmelCase , _UpperCAmelCase : Any = 0, 5
print(ford_fulkerson(graph, source, sink))
| 236 | 1 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
return " ".join(input_str.split()[::-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 224 |
def __lowerCAmelCase ( SCREAMING_SNAKE_CASE_ ):
lowercase__ = len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if numbers[j] < numbers[i]:
lowercase__ , lowercase__ = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowercase_ = input("""Enter numbers separated by a comma:\n""").strip()
lowercase_ = [int(item) for item in user_input.split(""",""")]
print(exchange_sort(unsorted))
| 224 | 1 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
_a = logging.get_logger(__name__)
@add_end_docstrings(A_ )
class __A ( A_ ):
'''simple docstring'''
def __init__( self , *__lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
super().__init__(*snake_case_ , **snake_case_ )
requires_backends(self , '''vision''' )
self.check_model_type(
TF_MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING
if self.framework == '''tf'''
else MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING )
def __lowerCamelCase ( self , __lowerCAmelCase=None ):
'''simple docstring'''
lowerCamelCase__ = {}
if top_k is not None:
lowerCamelCase__ = top_k
return {}, {}, postprocess_params
def __call__( self , __lowerCAmelCase , **__lowerCAmelCase ):
'''simple docstring'''
return super().__call__(snake_case_ , **snake_case_ )
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = load_image(snake_case_ )
lowerCamelCase__ = self.image_processor(images=snake_case_ , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , __lowerCAmelCase ):
'''simple docstring'''
lowerCamelCase__ = self.model(**snake_case_ )
return model_outputs
def __lowerCamelCase ( self , __lowerCAmelCase , __lowerCAmelCase=5 ):
'''simple docstring'''
if top_k > self.model.config.num_labels:
lowerCamelCase__ = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase__ = model_outputs.logits.softmax(-1 )[0]
lowerCamelCase__ , lowerCamelCase__ = probs.topk(snake_case_ )
elif self.framework == "tf":
lowerCamelCase__ = stable_softmax(model_outputs.logits , axis=-1 )[0]
lowerCamelCase__ = tf.math.top_k(snake_case_ , k=snake_case_ )
lowerCamelCase__ , lowerCamelCase__ = topk.values.numpy(), topk.indices.numpy()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
lowerCamelCase__ = scores.tolist()
lowerCamelCase__ = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(snake_case_ , snake_case_ )]
| 209 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class UpperCAmelCase_ ( A_ ):
lowercase__ = '''openai/whisper-base'''
lowercase__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
lowercase__ = '''transcriber'''
lowercase__ = WhisperProcessor
lowercase__ = WhisperForConditionalGeneration
lowercase__ = ['''audio''']
lowercase__ = ['''text''']
def __magic_name__ ( self : Optional[int] , snake_case_ : Dict ) -> int:
'''simple docstring'''
return self.pre_processor(snake_case_ , return_tensors="pt" ).input_features
def __magic_name__ ( self : int , snake_case_ : int ) -> str:
'''simple docstring'''
return self.model.generate(inputs=snake_case_ )
def __magic_name__ ( self : Tuple , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
return self.pre_processor.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )[0]
| 247 | 0 |
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : int = abs(lowerCamelCase )
UpperCamelCase_ : Any = 0
while n > 0:
res += n % 10
n //= 10
return res
def __lowercase ( lowerCamelCase : int ):
UpperCamelCase_ : Any = abs(lowerCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def __lowercase ( lowerCamelCase : int ):
return sum(int(lowerCamelCase ) for c in str(abs(lowerCamelCase ) ) )
def __lowercase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase : Callable , lowerCamelCase : int ) -> None:
UpperCamelCase_ : Any = F"{func.__name__}({value})"
UpperCamelCase_ : int = timeit(F"__main__.{call}" , setup='import __main__' )
print(F"{call:56} = {func(lowerCamelCase )} -- {timing:.4f} seconds" )
for value in (262144, 1125899906842624, 1267650600228229401496703205376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(lowerCamelCase , lowerCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 370 | import baseaa
def __lowercase ( lowerCamelCase : str ):
return baseaa.baaencode(string.encode('utf-8' ) )
def __lowercase ( lowerCamelCase : bytes ):
return baseaa.baadecode(lowerCamelCase ).decode('utf-8' )
if __name__ == "__main__":
a_ = 'Hello World!'
a_ = baseaa_encode(test)
print(encoded)
a_ = baseaa_decode(encoded)
print(decoded)
| 50 | 0 |
"""simple docstring"""
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=1 ) -> Any:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> Optional[Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item.replace('in_layers.0' , 'norm1' )
snake_case_ = new_item.replace('in_layers.2' , 'conv1' )
snake_case_ = new_item.replace('out_layers.0' , 'norm2' )
snake_case_ = new_item.replace('out_layers.3' , 'conv2' )
snake_case_ = new_item.replace('emb_layers.1' , 'time_emb_proj' )
snake_case_ = new_item.replace('skip_connection' , 'conv_shortcut' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase=0 ) -> List[Any]:
snake_case_ = []
for old_item in old_list:
snake_case_ = old_item
snake_case_ = new_item.replace('norm.weight' , 'group_norm.weight' )
snake_case_ = new_item.replace('norm.bias' , 'group_norm.bias' )
snake_case_ = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
snake_case_ = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
snake_case_ = shave_segments(UpperCAmelCase , n_shave_prefix_segments=UpperCAmelCase )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None ) -> Any:
assert isinstance(UpperCAmelCase , UpperCAmelCase ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
snake_case_ = old_checkpoint[path]
snake_case_ = old_tensor.shape[0] // 3
snake_case_ = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
snake_case_ = old_tensor.shape[0] // config['num_head_channels'] // 3
snake_case_ = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
snake_case_ , snake_case_ , snake_case_ = old_tensor.split(channels // num_heads , dim=1 )
snake_case_ = query.reshape(UpperCAmelCase )
snake_case_ = key.reshape(UpperCAmelCase )
snake_case_ = value.reshape(UpperCAmelCase )
for path in paths:
snake_case_ = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
snake_case_ = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
snake_case_ = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
snake_case_ = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
snake_case_ = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
snake_case_ = old_checkpoint[path['old']][:, :, 0]
else:
snake_case_ = old_checkpoint[path['old']]
def UpperCAmelCase ( UpperCAmelCase , UpperCAmelCase ) -> Optional[Any]:
snake_case_ = {}
snake_case_ = checkpoint['time_embed.0.weight']
snake_case_ = checkpoint['time_embed.0.bias']
snake_case_ = checkpoint['time_embed.2.weight']
snake_case_ = checkpoint['time_embed.2.bias']
snake_case_ = checkpoint['input_blocks.0.0.weight']
snake_case_ = checkpoint['input_blocks.0.0.bias']
snake_case_ = checkpoint['out.0.weight']
snake_case_ = checkpoint['out.0.bias']
snake_case_ = checkpoint['out.2.weight']
snake_case_ = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'input_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the middle blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'middle_block.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
# Retrieves the keys for the output blocks only
snake_case_ = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
snake_case_ = {
layer_id: [key for key in checkpoint if f'output_blocks.{layer_id}' in key]
for layer_id in range(UpperCAmelCase )
}
for i in range(1 , UpperCAmelCase ):
snake_case_ = (i - 1) // (config['num_res_blocks'] + 1)
snake_case_ = (i - 1) % (config['num_res_blocks'] + 1)
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.0' in key]
snake_case_ = [key for key in input_blocks[i] if f'input_blocks.{i}.1' in key]
if f'input_blocks.{i}.0.op.weight' in checkpoint:
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.weight'
]
snake_case_ = checkpoint[
f'input_blocks.{i}.0.op.bias'
]
continue
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'input_blocks.{i}.0', 'new': f'down_blocks.{block_id}.resnets.{layer_in_block_id}'}
snake_case_ = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path, resnet_op] , config=UpperCAmelCase )
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'input_blocks.{i}.1',
'new': f'down_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'input_blocks.{i}.1.qkv.bias': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'input_blocks.{i}.1.qkv.weight': {
'key': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase , )
snake_case_ = middle_blocks[0]
snake_case_ = middle_blocks[1]
snake_case_ = middle_blocks[2]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , config=UpperCAmelCase )
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , attention_paths_to_split=UpperCAmelCase , config=UpperCAmelCase )
for i in range(UpperCAmelCase ):
snake_case_ = i // (config['num_res_blocks'] + 1)
snake_case_ = i % (config['num_res_blocks'] + 1)
snake_case_ = [shave_segments(UpperCAmelCase , 2 ) for name in output_blocks[i]]
snake_case_ = {}
for layer in output_block_layers:
snake_case_ , snake_case_ = layer.split('.' )[0], shave_segments(UpperCAmelCase , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(UpperCAmelCase )
else:
snake_case_ = [layer_name]
if len(UpperCAmelCase ) > 1:
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.0' in key]
snake_case_ = [key for key in output_blocks[i] if f'output_blocks.{i}.1' in key]
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = renew_resnet_paths(UpperCAmelCase )
snake_case_ = {'old': f'output_blocks.{i}.0', 'new': f'up_blocks.{block_id}.resnets.{layer_in_block_id}'}
assign_to_checkpoint(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , config=UpperCAmelCase )
if ["conv.weight", "conv.bias"] in output_block_list.values():
snake_case_ = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.weight'
]
snake_case_ = checkpoint[
f'output_blocks.{i}.{index}.conv.bias'
]
# Clear attentions as they have been attributed above.
if len(UpperCAmelCase ) == 2:
snake_case_ = []
if len(UpperCAmelCase ):
snake_case_ = renew_attention_paths(UpperCAmelCase )
snake_case_ = {
'old': f'output_blocks.{i}.1',
'new': f'up_blocks.{block_id}.attentions.{layer_in_block_id}',
}
snake_case_ = {
f'output_blocks.{i}.1.qkv.bias': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias',
},
f'output_blocks.{i}.1.qkv.weight': {
'key': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight',
'query': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight',
'value': f'up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight',
},
}
assign_to_checkpoint(
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=UpperCAmelCase , )
else:
snake_case_ = renew_resnet_paths(UpperCAmelCase , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
snake_case_ = '.'.join(['output_blocks', str(UpperCAmelCase ), path['old']] )
snake_case_ = '.'.join(['up_blocks', str(UpperCAmelCase ), 'resnets', str(UpperCAmelCase ), path['new']] )
snake_case_ = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__UpperCamelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__UpperCamelCase = parser.parse_args()
__UpperCamelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__UpperCamelCase = json.loads(f.read())
__UpperCamelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__UpperCamelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__UpperCamelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__UpperCamelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 69 |
from __future__ import annotations
from typing import Any
class UpperCamelCase__ :
def __init__(self : Union[str, Any] , snake_case_ : int ):
__a : Dict = num_of_nodes
__a : list[list[int]] = []
__a : dict[int, int] = {}
def lowerCAmelCase (self : Optional[Any] , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowerCAmelCase (self : Any , snake_case_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowerCAmelCase (self : str , snake_case_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
__a : Optional[int] = self.find_component(snake_case_ )
def lowerCAmelCase (self : Any , snake_case_ : list[int] , snake_case_ : int , snake_case_ : int ):
if component_size[u_node] <= component_size[v_node]:
__a : List[str] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(snake_case_ )
elif component_size[u_node] >= component_size[v_node]:
__a : Optional[int] = self.find_component(snake_case_ )
component_size[u_node] += component_size[v_node]
self.set_component(snake_case_ )
def lowerCAmelCase (self : Optional[Any] ):
__a : str = []
__a : int = 0
__a : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
__a : Union[str, Any] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
__a , __a , __a : Optional[Any] = edge
__a : List[str] = self.m_component[u]
__a : List[Any] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
__a : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(snake_case_ , snake_case_ ):
__a , __a , __a : str = edge
__a : Any = self.m_component[u]
__a : Any = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(snake_case_ , snake_case_ , snake_case_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
__a : Optional[int] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def __UpperCamelCase ( ):
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 0 |
"""simple docstring"""
def _snake_case ( ):
for n in range(1 , 1_000_000 ):
yield n * (n + 1) // 2
def _snake_case ( lowerCamelCase__ : Dict ):
lowerCamelCase_ : Optional[int] =1
lowerCamelCase_ : Tuple =2
while i * i <= n:
lowerCamelCase_ : List[str] =0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def _snake_case ( ):
return next(i for i in triangle_number_generator() if count_divisors(UpperCamelCase__ ) > 500 )
if __name__ == "__main__":
print(solution())
| 352 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import List, Optional
import pyarrow as pa
import pyarrow.parquet as pq
import datasets
from datasets.table import table_cast
A__ : List[str] = datasets.utils.logging.get_logger(__name__)
@dataclass
class lowercase__ ( datasets.BuilderConfig ):
_UpperCAmelCase :int = 10000
_UpperCAmelCase :Optional[List[str]] = None
_UpperCAmelCase :Optional[datasets.Features] = None
class lowercase__ ( datasets.ArrowBasedBuilder ):
_UpperCAmelCase :Optional[int] = ParquetConfig
def UpperCAmelCase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : List[Any] ):
if not self.config.data_files:
raise ValueError(F"""At least one data file must be specified, but got data_files={self.config.data_files}""" )
lowerCamelCase_ : str =dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case__ , (str, list, tuple) ):
lowerCamelCase_ : Dict =data_files
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : List[Any] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : Any =[dl_manager.iter_files(snake_case__ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
lowerCamelCase_ : Optional[int] =[]
for split_name, files in data_files.items():
if isinstance(snake_case__ , snake_case__ ):
lowerCamelCase_ : Optional[int] =[files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
lowerCamelCase_ : int =[dl_manager.iter_files(snake_case__ ) for file in files]
# Infer features is they are stoed in the arrow schema
if self.info.features is None:
for file in itertools.chain.from_iterable(snake_case__ ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[Any] =datasets.Features.from_arrow_schema(pq.read_schema(snake_case__ ) )
break
splits.append(datasets.SplitGenerator(name=snake_case__ , gen_kwargs={"files": files} ) )
return splits
def UpperCAmelCase__ ( self : int , snake_case__ : pa.Table ):
if self.info.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
lowerCamelCase_ : List[str] =table_cast(snake_case__ , self.info.features.arrow_schema )
return pa_table
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[Any] ):
lowerCamelCase_ : Tuple =self.info.features.arrow_schema if self.info.features is not None else None
if self.info.features is not None and self.config.columns is not None:
if sorted(field.name for field in schema ) != sorted(self.config.columns ):
raise ValueError(
F"""Tried to load parquet data with columns '{self.config.columns}' with mismatching features '{self.info.features}'""" )
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case__ ) ):
with open(snake_case__ , "rb" ) as f:
lowerCamelCase_ : List[str] =pq.ParquetFile(snake_case__ )
try:
for batch_idx, record_batch in enumerate(
parquet_file.iter_batches(batch_size=self.config.batch_size , columns=self.config.columns ) ):
lowerCamelCase_ : Union[str, Any] =pa.Table.from_batches([record_batch] )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield F"""{file_idx}_{batch_idx}""", self._cast_table(snake_case__ )
except ValueError as e:
logger.error(F"""Failed to read file '{file}' with error {type(snake_case__ )}: {e}""" )
raise
| 209 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A__ ( unittest.TestCase ):
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = tempfile.mkdtemp()
# fmt: off
UpperCamelCase : Union[str, Any] = ["", "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "lo", "l</w>", "w</w>", "r</w>", "t</w>", "low</w>", "er</w>", "lowest</w>", "newer</w>", "wider", "<unk>", "<|startoftext|>", "<|endoftext|>"]
# fmt: on
UpperCamelCase : int = dict(zip(A_ , range(len(A_ ) ) ) )
UpperCamelCase : Optional[Any] = ["#version: 0.2", "l o", "lo w</w>", "e r</w>", ""]
UpperCamelCase : int = {"unk_token": "<unk>"}
UpperCamelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
UpperCamelCase : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A_ ) )
UpperCamelCase : Optional[int] = {
"do_resize": True,
"size": 20,
"do_center_crop": True,
"crop_size": 18,
"do_normalize": True,
"image_mean": [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
"image_std": [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
UpperCamelCase : Tuple = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A_ , A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token="!" , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token="!" , **A_ )
def __UpperCamelCase( self , **A_ ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCamelCase : Optional[Any] = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.get_tokenizer()
UpperCamelCase : List[Any] = self.get_rust_tokenizer()
UpperCamelCase : Tuple = self.get_image_processor()
UpperCamelCase : List[Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
UpperCamelCase : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
UpperCamelCase : Optional[Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
UpperCamelCase : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCamelCase : int = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
UpperCamelCase : List[str] = self.get_image_processor(do_normalize=A_ )
UpperCamelCase : Optional[int] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A_ )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = self.get_image_processor()
UpperCamelCase : Union[str, Any] = self.get_tokenizer()
UpperCamelCase : Dict = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Dict = self.prepare_image_inputs()
UpperCamelCase : Optional[Any] = image_processor(A_ , return_tensors="np" )
UpperCamelCase : Tuple = processor(images=A_ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_image_processor()
UpperCamelCase : Dict = self.get_tokenizer()
UpperCamelCase : str = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Optional[Any] = "lower newer"
UpperCamelCase : List[str] = processor(text=A_ , return_tensors="np" )
UpperCamelCase : str = tokenizer(A_ , return_tensors="np" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_image_processor()
UpperCamelCase : int = self.get_tokenizer()
UpperCamelCase : Tuple = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : int = "lower newer"
UpperCamelCase : List[str] = self.prepare_image_inputs()
UpperCamelCase : Any = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : int = "google/owlvit-base-patch32"
UpperCamelCase : int = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase : Union[str, Any] = ["cat", "nasa badge"]
UpperCamelCase : Any = processor(text=A_ )
UpperCamelCase : Union[str, Any] = 16
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = "google/owlvit-base-patch32"
UpperCamelCase : List[str] = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase : Dict = [["cat", "nasa badge"], ["person"]]
UpperCamelCase : Tuple = processor(text=A_ )
UpperCamelCase : str = 16
UpperCamelCase : Optional[Any] = len(A_ )
UpperCamelCase : Tuple = max([len(A_ ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[str] = "google/owlvit-base-patch32"
UpperCamelCase : Optional[Any] = OwlViTProcessor.from_pretrained(A_ )
UpperCamelCase : List[Any] = ["cat", "nasa badge"]
UpperCamelCase : Any = processor(text=A_ )
UpperCamelCase : Optional[int] = 16
UpperCamelCase : Tuple = inputs["input_ids"]
UpperCamelCase : int = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask"] )
self.assertEqual(inputs["input_ids"].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : str = self.get_image_processor()
UpperCamelCase : List[str] = self.get_tokenizer()
UpperCamelCase : Union[str, Any] = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Tuple = self.prepare_image_inputs()
UpperCamelCase : Optional[Any] = self.prepare_image_inputs()
UpperCamelCase : Tuple = processor(images=A_ , query_images=A_ )
self.assertListEqual(list(inputs.keys() ) , ["query_pixel_values", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = self.get_image_processor()
UpperCamelCase : Dict = self.get_tokenizer()
UpperCamelCase : int = OwlViTProcessor(tokenizer=A_ , image_processor=A_ )
UpperCamelCase : Optional[Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCamelCase : Dict = processor.batch_decode(A_ )
UpperCamelCase : Optional[int] = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ )
| 52 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
_a = '''scheduler_config.json'''
class A_ ( snake_case__ ):
_lowercase : Optional[Any] = 1
_lowercase : Tuple = 2
_lowercase : Dict = 3
_lowercase : int = 4
_lowercase : Optional[Any] = 5
@dataclass
class A_ ( snake_case__ ):
_lowercase : jnp.ndarray
class A_ :
_lowercase : Optional[int] = SCHEDULER_CONFIG_NAME
_lowercase : Dict = ['dtype']
_lowercase : int = []
_lowercase : Union[str, Any] = True
@classmethod
def UpperCAmelCase ( cls : Union[str, Any] , UpperCAmelCase : Dict[str, Any] = None , UpperCAmelCase : Optional[str] = None , UpperCAmelCase : List[str]=False , **UpperCAmelCase : Optional[int] , ) -> Tuple:
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.load_config(
pretrained_model_name_or_path=UpperCAmelCase , subfolder=UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase , )
__lowerCAmelCase , __lowerCAmelCase: Optional[Any] = cls.from_config(UpperCAmelCase , return_unused_kwargs=UpperCAmelCase , **UpperCAmelCase )
if hasattr(UpperCAmelCase , 'create_state' ) and getattr(UpperCAmelCase , 'has_state' , UpperCAmelCase ):
__lowerCAmelCase: Dict = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def UpperCAmelCase ( self : Tuple , UpperCAmelCase : Union[str, os.PathLike] , UpperCAmelCase : bool = False , **UpperCAmelCase : Any ) -> List[str]:
self.save_config(save_directory=UpperCAmelCase , push_to_hub=UpperCAmelCase , **UpperCAmelCase )
@property
def UpperCAmelCase ( self : str ) -> Dict:
return self._get_compatibles()
@classmethod
def UpperCAmelCase ( cls : Optional[int] ) -> Any:
__lowerCAmelCase: Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
__lowerCAmelCase: Dict = importlib.import_module(__name__.split('.' )[0] )
__lowerCAmelCase: Dict = [
getattr(UpperCAmelCase , UpperCAmelCase ) for c in compatible_classes_str if hasattr(UpperCAmelCase , UpperCAmelCase )
]
return compatible_classes
def _a ( SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Tuple[int] ) -> jnp.ndarray:
"""simple docstring"""
assert len(SCREAMING_SNAKE_CASE ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(SCREAMING_SNAKE_CASE ) - x.ndim) ) , SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Any=0.9_9_9 , SCREAMING_SNAKE_CASE : List[Any]=jnp.floataa ) -> jnp.ndarray:
"""simple docstring"""
def alpha_bar(SCREAMING_SNAKE_CASE : str ):
return math.cos((time_step + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
__lowerCAmelCase: str = []
for i in range(SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Union[str, Any] = i / num_diffusion_timesteps
__lowerCAmelCase: List[str] = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(SCREAMING_SNAKE_CASE ) / alpha_bar(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE ) )
return jnp.array(SCREAMING_SNAKE_CASE , dtype=SCREAMING_SNAKE_CASE )
@flax.struct.dataclass
class A_ :
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
_lowercase : jnp.ndarray
@classmethod
def UpperCAmelCase ( cls : str , UpperCAmelCase : Optional[int] ) -> Any:
__lowerCAmelCase: str = scheduler.config
if config.trained_betas is not None:
__lowerCAmelCase: Tuple = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
__lowerCAmelCase: Any = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__lowerCAmelCase: List[Any] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__lowerCAmelCase: str = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
F'''beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}''' )
__lowerCAmelCase: Optional[Any] = 1.0 - betas
__lowerCAmelCase: Optional[Any] = jnp.cumprod(UpperCAmelCase , axis=0 )
return cls(
alphas=UpperCAmelCase , betas=UpperCAmelCase , alphas_cumprod=UpperCAmelCase , )
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> int:
"""simple docstring"""
__lowerCAmelCase: Optional[int] = state.alphas_cumprod
__lowerCAmelCase: str = alphas_cumprod[timesteps] ** 0.5
__lowerCAmelCase: Any = sqrt_alpha_prod.flatten()
__lowerCAmelCase: Any = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
__lowerCAmelCase: Any = (1 - alphas_cumprod[timesteps]) ** 0.5
__lowerCAmelCase: str = sqrt_one_minus_alpha_prod.flatten()
__lowerCAmelCase: str = broadcast_to_shape_from_left(SCREAMING_SNAKE_CASE , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> str:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Union[str, Any] = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Optional[int] = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def _a ( SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ) -> Any:
"""simple docstring"""
__lowerCAmelCase , __lowerCAmelCase: Tuple = get_sqrt_alpha_prod(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__lowerCAmelCase: int = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 322 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__A =logging.get_logger(__name__)
if is_vision_available():
import PIL
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = ['pixel_values']
def __init__( self , lowercase = True , lowercase = None , lowercase = PILImageResampling.BICUBIC , lowercase = True , lowercase = None , lowercase = True , lowercase = 1 / 255 , lowercase = True , lowercase = None , lowercase = None , lowercase = True , **lowercase , ) -> None:
super().__init__(**lowercase )
lowerCamelCase_ = size if size is not None else {"shortest_edge": 224}
lowerCamelCase_ = get_size_dict(lowercase , default_to_square=lowercase )
lowerCamelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
lowerCamelCase_ = get_size_dict(lowercase , default_to_square=lowercase , param_name="crop_size" )
lowerCamelCase_ = do_resize
lowerCamelCase_ = size
lowerCamelCase_ = resample
lowerCamelCase_ = do_center_crop
lowerCamelCase_ = crop_size
lowerCamelCase_ = do_rescale
lowerCamelCase_ = rescale_factor
lowerCamelCase_ = do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCamelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCamelCase_ = do_convert_rgb
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = PILImageResampling.BICUBIC , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCamelCase_ = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
lowerCamelCase_ = get_resize_output_image_size(lowercase , size=size["shortest_edge"] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
lowerCamelCase_ = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(lowercase , size=(size["height"], size["width"]) , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase = None , **lowercase , ) -> Any:
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase , lowercase , lowercase = None , **lowercase , ) -> np.ndarray:
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def SCREAMING_SNAKE_CASE_( self , lowercase , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = None , lowercase = ChannelDimension.FIRST , **lowercase , ) -> PIL.Image.Image:
lowerCamelCase_ = do_resize if do_resize is not None else self.do_resize
lowerCamelCase_ = size if size is not None else self.size
lowerCamelCase_ = get_size_dict(lowercase , param_name="size" , default_to_square=lowercase )
lowerCamelCase_ = resample if resample is not None else self.resample
lowerCamelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase_ = crop_size if crop_size is not None else self.crop_size
lowerCamelCase_ = get_size_dict(lowercase , param_name="crop_size" , default_to_square=lowercase )
lowerCamelCase_ = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_ = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase_ = image_mean if image_mean is not None else self.image_mean
lowerCamelCase_ = image_std if image_std is not None else self.image_std
lowerCamelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCamelCase_ = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCamelCase_ = [convert_to_rgb(lowercase ) for image in images]
# All transformations expect numpy arrays.
lowerCamelCase_ = [to_numpy_array(lowercase ) for image in images]
if do_resize:
lowerCamelCase_ = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
lowerCamelCase_ = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
lowerCamelCase_ = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
lowerCamelCase_ = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
lowerCamelCase_ = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
lowerCamelCase_ = {"pixel_values": images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
| 47 |
import importlib
import torch
import yaml
from omegaconf import OmegaConf
from taming.models.vqgan import VQModel
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ = OmegaConf.load(lowerCamelCase__ )
if display:
print(yaml.dump(OmegaConf.to_container(lowerCamelCase__ ) ) )
return config
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=None , lowerCamelCase__=None ):
if conf_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.yaml"
lowerCamelCase_ = load_config(lowerCamelCase__ , display=lowerCamelCase__ )
lowerCamelCase_ = VQModel(**config.model.params )
if ckpt_path is None:
lowerCamelCase_ = "./model_checkpoints/vqgan_only.pt"
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location=lowerCamelCase__ )
if ".ckpt" in ckpt_path:
lowerCamelCase_ = sd["state_dict"]
model.load_state_dict(lowerCamelCase__ , strict=lowerCamelCase__ )
model.to(lowerCamelCase__ )
del sd
return model
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = model.encode(lowerCamelCase__ )
print(F'VQGAN --- {model.__class__.__name__}: latent shape: {z.shape[2:]}' )
lowerCamelCase_ = model.decode(lowerCamelCase__ )
return xrec
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__=False ):
lowerCamelCase_ , lowerCamelCase_ = string.rsplit("." , 1 )
if reload:
lowerCamelCase_ = importlib.import_module(lowerCamelCase__ )
importlib.reload(lowerCamelCase__ )
return getattr(importlib.import_module(lowerCamelCase__ , package=lowerCamelCase__ ) , cls )
def lowerCamelCase_ ( lowerCamelCase__ ):
if "target" not in config:
raise KeyError("Expected key `target` to instantiate." )
return get_obj_from_str(config["target"] )(**config.get("params" , {} ) )
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=True , lowerCamelCase__=True ):
lowerCamelCase_ = instantiate_from_config(lowerCamelCase__ )
if sd is not None:
model.load_state_dict(lowerCamelCase__ )
if gpu:
model.cuda()
if eval_mode:
model.eval()
return {"model": model}
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
# load the specified checkpoint
if ckpt:
lowerCamelCase_ = torch.load(lowerCamelCase__ , map_location="cpu" )
lowerCamelCase_ = pl_sd["global_step"]
print(F'loaded model from global step {global_step}.' )
else:
lowerCamelCase_ = {"state_dict": None}
lowerCamelCase_ = None
lowerCamelCase_ = load_model_from_config(config.model , pl_sd["state_dict"] , gpu=lowerCamelCase__ , eval_mode=lowerCamelCase__ )["model"]
return model, global_step
| 47 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
a__ : str = {
"configuration_vision_encoder_decoder": ["VisionEncoderDecoderConfig", "VisionEncoderDecoderOnnxConfig"]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = ["VisionEncoderDecoderModel"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ["TFVisionEncoderDecoderModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : int = ["FlaxVisionEncoderDecoderModel"]
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
a__ : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 161 | """simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCAmelCase_ ( _lowercase):
def __init__( self : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str]=13 , __UpperCamelCase : str=7 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : int=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=512 , __UpperCamelCase : List[str]=16 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : str=False , __UpperCamelCase : Dict=True , __UpperCamelCase : Tuple="None" , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Any=None , ) -> Tuple:
_UpperCamelCase = parent
_UpperCamelCase = batch_size
_UpperCamelCase = seq_length
_UpperCamelCase = is_training
_UpperCamelCase = use_input_mask
_UpperCamelCase = use_token_type_ids
_UpperCamelCase = use_labels
_UpperCamelCase = vocab_size
_UpperCamelCase = hidden_size
_UpperCamelCase = num_hidden_layers
_UpperCamelCase = num_attention_heads
_UpperCamelCase = intermediate_size
_UpperCamelCase = hidden_act
_UpperCamelCase = hidden_dropout_prob
_UpperCamelCase = attention_probs_dropout_prob
_UpperCamelCase = max_position_embeddings
_UpperCamelCase = type_vocab_size
_UpperCamelCase = type_sequence_label_size
_UpperCamelCase = initializer_range
_UpperCamelCase = num_labels
_UpperCamelCase = num_choices
_UpperCamelCase = relative_attention
_UpperCamelCase = position_biased_input
_UpperCamelCase = pos_att_type
_UpperCamelCase = scope
def _UpperCamelCase ( self : Union[str, Any] ) -> Union[str, Any]:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCamelCase = None
if self.use_input_mask:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
_UpperCamelCase = None
if self.use_token_type_ids:
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCamelCase = None
_UpperCamelCase = None
_UpperCamelCase = None
if self.use_labels:
_UpperCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCamelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCamelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _UpperCamelCase ( self : Optional[int] ) -> List[Any]:
_UpperCamelCase = self.get_config()
_UpperCamelCase = 300
return config
def _UpperCamelCase ( self : int , __UpperCamelCase : List[Any] ) -> str:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _UpperCamelCase ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] ) -> List[str]:
_UpperCamelCase = DebertaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )[0]
_UpperCamelCase = model(__UpperCamelCase )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _UpperCamelCase ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[int] ) -> Tuple:
_UpperCamelCase = DebertaForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _UpperCamelCase ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) -> List[Any]:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) -> Dict:
_UpperCamelCase = self.num_labels
_UpperCamelCase = DebertaForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) -> List[Any]:
_UpperCamelCase = DebertaForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCamelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _UpperCamelCase ( self : Any ) -> Union[str, Any]:
_UpperCamelCase = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) , (
_UpperCamelCase
) ,
) = config_and_inputs
_UpperCamelCase = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class UpperCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase):
snake_case__ = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
snake_case__ = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
snake_case__ = True
snake_case__ = False
snake_case__ = False
snake_case__ = False
snake_case__ = False
def _UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
_UpperCamelCase = DebertaModelTester(self )
_UpperCamelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _UpperCamelCase ( self : Optional[int] ) -> int:
self.config_tester.run_common_tests()
def _UpperCamelCase ( self : Any ) -> List[str]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[Any] ) -> Optional[int]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__UpperCamelCase )
def _UpperCamelCase ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__UpperCamelCase )
def _UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__UpperCamelCase )
def _UpperCamelCase ( self : Dict ) -> Tuple:
_UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__UpperCamelCase )
@slow
def _UpperCamelCase ( self : Any ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase = DebertaModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@require_torch
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase_ ( unittest.TestCase):
@unittest.skip(reason='''Model not available yet''' )
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
pass
@slow
def _UpperCamelCase ( self : Tuple ) -> Union[str, Any]:
_UpperCamelCase = DebertaModel.from_pretrained('''microsoft/deberta-base''' )
_UpperCamelCase = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
_UpperCamelCase = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_UpperCamelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase )[0]
# compare the actual values for a slice.
_UpperCamelCase = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __UpperCamelCase , atol=1E-4 ) , F'''{output[:, 1:4, 1:4]}''' )
| 256 | 0 |
from ..utils import DummyObject, requires_backends
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : int = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Any = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Dict = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : str = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Dict = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Dict = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
def lowerCAmelCase_ ( *__UpperCAmelCase: List[str] , **__UpperCAmelCase: int ) -> Optional[Any]:
requires_backends(__UpperCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__UpperCAmelCase: Dict , **__UpperCAmelCase: Union[str, Any] ) -> str:
requires_backends(__UpperCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__UpperCAmelCase: Optional[Any] , **__UpperCAmelCase: List[str] ) -> Optional[Any]:
requires_backends(__UpperCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__UpperCAmelCase: Any , **__UpperCAmelCase: List[Any] ) -> List[Any]:
requires_backends(__UpperCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__UpperCAmelCase: int , **__UpperCAmelCase: Dict ) -> int:
requires_backends(__UpperCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__UpperCAmelCase: List[Any] , **__UpperCAmelCase: Dict ) -> Any:
requires_backends(__UpperCAmelCase , ['''torch'''] )
def lowerCAmelCase_ ( *__UpperCAmelCase: Optional[int] , **__UpperCAmelCase: Any ) -> List[Any]:
requires_backends(__UpperCAmelCase , ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Any = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : str = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : str = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Tuple = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Tuple = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Any = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Any = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Tuple = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Union[str, Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Any = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : str = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : str = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : int = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Tuple = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[int] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Dict = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> str:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : str = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Tuple = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Dict = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Tuple:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Any:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Tuple = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Optional[int]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Optional[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Any = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> int:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : List[str] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
class lowercase__ ( metaclass=__lowerCamelCase ):
'''simple docstring'''
a : Optional[Any] = ["torch"]
def __init__( self, *__magic_name__, **__magic_name__ ) -> List[Any]:
"""simple docstring"""
requires_backends(self, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> List[str]:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
@classmethod
def UpperCamelCase__ ( cls, *__magic_name__, **__magic_name__ ) -> Dict:
"""simple docstring"""
requires_backends(cls, ['''torch'''] )
| 247 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase_ = {
'configuration_xlm_roberta': [
'XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP',
'XLMRobertaConfig',
'XLMRobertaOnnxConfig',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = ['XLMRobertaTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'XLMRobertaForCausalLM',
'XLMRobertaForMaskedLM',
'XLMRobertaForMultipleChoice',
'XLMRobertaForQuestionAnswering',
'XLMRobertaForSequenceClassification',
'XLMRobertaForTokenClassification',
'XLMRobertaModel',
'XLMRobertaPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXLMRobertaForCausalLM',
'TFXLMRobertaForMaskedLM',
'TFXLMRobertaForMultipleChoice',
'TFXLMRobertaForQuestionAnswering',
'TFXLMRobertaForSequenceClassification',
'TFXLMRobertaForTokenClassification',
'TFXLMRobertaModel',
'TFXLMRobertaPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxXLMRobertaForMaskedLM',
'FlaxXLMRobertaForCausalLM',
'FlaxXLMRobertaForMultipleChoice',
'FlaxXLMRobertaForQuestionAnswering',
'FlaxXLMRobertaForSequenceClassification',
'FlaxXLMRobertaForTokenClassification',
'FlaxXLMRobertaModel',
'FlaxXLMRobertaPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 247 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
UpperCAmelCase : Tuple =logging.get_logger(__name__)
UpperCAmelCase : Dict ={
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
for attribute in key.split("."):
UpperCamelCase_ = getattr(_lowerCAmelCase , _lowerCAmelCase)
if weight_type is not None:
UpperCamelCase_ = getattr(_lowerCAmelCase , _lowerCAmelCase).shape
else:
UpperCamelCase_ = hf_pointer.shape
assert hf_shape == value.shape, (
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
UpperCamelCase_ = value
elif weight_type == "weight_g":
UpperCamelCase_ = value
elif weight_type == "weight_v":
UpperCamelCase_ = value
elif weight_type == "bias":
UpperCamelCase_ = value
else:
UpperCamelCase_ = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""")
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
UpperCamelCase_ = fairseq_model.state_dict()
UpperCamelCase_ = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
UpperCamelCase_ = False
if "conv_layers" in name:
load_conv_layer(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , hf_model.config.feat_extract_norm == "group" , )
UpperCamelCase_ = True
else:
for key, mapped_key in MAPPING.items():
UpperCamelCase_ = "sew." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
UpperCamelCase_ = True
if "*" in mapped_key:
UpperCamelCase_ = name.split(_lowerCAmelCase)[0].split(".")[-2]
UpperCamelCase_ = mapped_key.replace("*" , _lowerCAmelCase)
if "weight_g" in name:
UpperCamelCase_ = "weight_g"
elif "weight_v" in name:
UpperCamelCase_ = "weight_v"
elif "weight" in name:
UpperCamelCase_ = "weight"
elif "bias" in name:
UpperCamelCase_ = "bias"
else:
UpperCamelCase_ = None
set_recursively(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
continue
if not is_used:
unused_weights.append(_lowerCAmelCase)
logger.warning(f"""Unused weights: {unused_weights}""")
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = full_name.split("conv_layers.")[-1]
UpperCamelCase_ = name.split(".")
UpperCamelCase_ = int(items[0])
UpperCamelCase_ = int(items[1])
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""")
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
UpperCamelCase_ = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""")
else:
unused_weights.append(_lowerCAmelCase)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = SEWConfig()
if is_finetuned:
UpperCamelCase_ = model.wav_encoder.wav_model.cfg
else:
UpperCamelCase_ = model.cfg
UpperCamelCase_ = fs_config.conv_bias
UpperCamelCase_ = eval(fs_config.conv_feature_layers)
UpperCamelCase_ = [x[0] for x in conv_layers]
UpperCamelCase_ = [x[1] for x in conv_layers]
UpperCamelCase_ = [x[2] for x in conv_layers]
UpperCamelCase_ = "gelu"
UpperCamelCase_ = "layer" if fs_config.extractor_mode == "layer_norm" else "group"
UpperCamelCase_ = 0.0
UpperCamelCase_ = fs_config.activation_fn.name
UpperCamelCase_ = fs_config.encoder_embed_dim
UpperCamelCase_ = 0.02
UpperCamelCase_ = fs_config.encoder_ffn_embed_dim
UpperCamelCase_ = 1e-5
UpperCamelCase_ = fs_config.encoder_layerdrop
UpperCamelCase_ = fs_config.encoder_attention_heads
UpperCamelCase_ = fs_config.conv_pos_groups
UpperCamelCase_ = fs_config.conv_pos
UpperCamelCase_ = len(_lowerCAmelCase)
UpperCamelCase_ = fs_config.encoder_layers
UpperCamelCase_ = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
UpperCamelCase_ = model.cfg
UpperCamelCase_ = fs_config.final_dropout
UpperCamelCase_ = fs_config.layerdrop
UpperCamelCase_ = fs_config.activation_dropout
UpperCamelCase_ = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
UpperCamelCase_ = fs_config.attention_dropout
UpperCamelCase_ = fs_config.dropout_input
UpperCamelCase_ = fs_config.dropout
UpperCamelCase_ = fs_config.mask_channel_length
UpperCamelCase_ = fs_config.mask_channel_prob
UpperCamelCase_ = fs_config.mask_length
UpperCamelCase_ = fs_config.mask_prob
UpperCamelCase_ = "Wav2Vec2FeatureExtractor"
UpperCamelCase_ = "Wav2Vec2CTCTokenizer"
return config
@torch.no_grad()
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=True):
if is_finetuned:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/")[:-1])})
else:
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path])
if config_path is not None:
UpperCamelCase_ = SEWConfig.from_pretrained(_lowerCAmelCase)
else:
UpperCamelCase_ = convert_config(model[0] , _lowerCAmelCase)
UpperCamelCase_ = model[0].eval()
UpperCamelCase_ = True if config.feat_extract_norm == "layer" else False
UpperCamelCase_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , )
if is_finetuned:
if dict_path:
UpperCamelCase_ = Dictionary.load(_lowerCAmelCase)
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
UpperCamelCase_ = target_dict.pad_index
UpperCamelCase_ = target_dict.bos_index
UpperCamelCase_ = target_dict.pad_index
UpperCamelCase_ = target_dict.bos_index
UpperCamelCase_ = target_dict.eos_index
UpperCamelCase_ = len(target_dict.symbols)
UpperCamelCase_ = os.path.join(_lowerCAmelCase , "vocab.json")
if not os.path.isdir(_lowerCAmelCase):
logger.error("--pytorch_dump_folder_path ({}) should be a directory".format(_lowerCAmelCase))
return
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase)
with open(_lowerCAmelCase , "w" , encoding="utf-8") as vocab_handle:
json.dump(target_dict.indices , _lowerCAmelCase)
UpperCamelCase_ = WavaVecaCTCTokenizer(
_lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="|" , do_lower_case=_lowerCAmelCase , )
UpperCamelCase_ = WavaVecaProcessor(feature_extractor=_lowerCAmelCase , tokenizer=_lowerCAmelCase)
processor.save_pretrained(_lowerCAmelCase)
UpperCamelCase_ = SEWForCTC(_lowerCAmelCase)
else:
UpperCamelCase_ = SEWModel(_lowerCAmelCase)
feature_extractor.save_pretrained(_lowerCAmelCase)
recursively_load_weights(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase)
hf_model.save_pretrained(_lowerCAmelCase)
if __name__ == "__main__":
UpperCAmelCase : List[str] =argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
UpperCAmelCase : Optional[Any] =parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 128 |
UpperCAmelCase : dict[tuple[int, int, int], int] ={}
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase):
# if we are absent twice, or late 3 consecutive days,
# no further prize strings are possible
if late == 3 or absent == 2:
return 0
# if we have no days left, and have not failed any other rules,
# we have a prize string
if days == 0:
return 1
# No easy solution, so now we need to do the recursive calculation
# First, check if the combination is already in the cache, and
# if yes, return the stored value from there since we already
# know the number of possible prize strings from this point on
UpperCamelCase_ = (days, absent, late)
if key in cache:
return cache[key]
# now we calculate the three possible ways that can unfold from
# this point on, depending on our attendance today
# 1) if we are late (but not absent), the "absent" counter stays as
# it is, but the "late" counter increases by one
UpperCamelCase_ = _calculate(days - 1 , _lowerCAmelCase , late + 1)
# 2) if we are absent, the "absent" counter increases by 1, and the
# "late" counter resets to 0
UpperCamelCase_ = _calculate(days - 1 , absent + 1 , 0)
# 3) if we are on time, this resets the "late" counter and keeps the
# absent counter
UpperCamelCase_ = _calculate(days - 1 , _lowerCAmelCase , 0)
UpperCamelCase_ = state_late + state_absent + state_ontime
UpperCamelCase_ = prizestrings
return prizestrings
def _lowerCAmelCase (_lowerCAmelCase = 30):
return _calculate(_lowerCAmelCase , absent=0 , late=0)
if __name__ == "__main__":
print(solution())
| 128 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : List[Any] = FileLock(str(tmpdir / "foo.lock" ) )
__snake_case : List[str] = FileLock(str(tmpdir / "foo.lock" ) )
__snake_case : Tuple = 0.0_1
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
__snake_case : List[str] = time.time()
locka.acquire(__lowerCamelCase )
assert time.time() - _start > timeout
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Dict = "a" * 1_0_0_0 + ".lock"
__snake_case : Tuple = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__lowerCamelCase )
assert len(os.path.basename(locka._lock_file ) ) <= 2_5_5
__snake_case : List[str] = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__lowerCamelCase ):
locka.acquire(0 )
| 134 |
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
_snake_case : List[str] = namedtuple(
"_TestCommandArgs",
[
"dataset",
"name",
"cache_dir",
"data_dir",
"all_configs",
"save_infos",
"ignore_verifications",
"force_redownload",
"clear_cache",
],
defaults=[None, None, None, False, False, False, False, False],
)
def lowerCAmelCase_ ( __lowerCamelCase , __lowerCamelCase ):
return (abs(source - target ) / target) < 0.0_1
@pytest.mark.integration
def lowerCAmelCase_ ( __lowerCamelCase ):
__snake_case : Tuple = _TestCommandArgs(dataset=__lowerCamelCase , all_configs=__lowerCamelCase , save_infos=__lowerCamelCase )
__snake_case : List[Any] = TestCommand(*__lowerCamelCase )
test_command.run()
__snake_case : List[Any] = os.path.join(__lowerCamelCase , "README.md" )
assert os.path.exists(__lowerCamelCase )
__snake_case : Optional[Any] = DatasetInfosDict.from_directory(__lowerCamelCase )
__snake_case : List[str] = DatasetInfosDict(
{
"default": DatasetInfo(
features=Features(
{
"tokens": Sequence(Value("string" ) ),
"ner_tags": Sequence(
ClassLabel(names=["O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"] ) ),
"langs": Sequence(Value("string" ) ),
"spans": Sequence(Value("string" ) ),
} ) , splits=[
{
"name": "train",
"num_bytes": 2_3_5_1_5_6_3,
"num_examples": 1_0_0_0_0,
},
{
"name": "validation",
"num_bytes": 2_3_8_4_1_8,
"num_examples": 1_0_0_0,
},
] , download_size=3_9_4_0_6_8_0 , dataset_size=2_5_8_9_9_8_1 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
__snake_case , __snake_case : Tuple = getattr(dataset_infos["default"] , __lowerCamelCase ), getattr(expected_dataset_infos["default"] , __lowerCamelCase )
if key == "num_bytes":
assert is_apercent_close(__lowerCamelCase , __lowerCamelCase )
elif key == "splits":
assert list(__lowerCamelCase ) == list(__lowerCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 134 | 1 |
def lowerCamelCase__ ( snake_case_ : int = 100 ) -> int:
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(F'{solution() = }')
| 24 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
"s-JoL/Open-Llama-V1": "https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json",
}
class __lowerCAmelCase ( A ):
UpperCamelCase = '''open-llama'''
def __init__( self : str , A : List[Any]=10_00_00 , A : Tuple=40_96 , A : Tuple=1_10_08 , A : List[str]=32 , A : Tuple=32 , A : Optional[Any]="silu" , A : int=20_48 , A : Optional[Any]=0.0_2 , A : Dict=1E-6 , A : Optional[Any]=True , A : List[Any]=0 , A : Dict=1 , A : int=2 , A : Dict=False , A : Optional[int]=True , A : List[Any]=0.1 , A : str=0.1 , A : Dict=True , A : Optional[Any]=True , A : Dict=None , **A : Union[str, Any] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = rms_norm_eps
_UpperCAmelCase = use_cache
_UpperCAmelCase = kwargs.pop(
'use_memorry_efficient_attention' , A)
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_dropout_prob
_UpperCAmelCase = use_stable_embedding
_UpperCAmelCase = shared_input_output_embedding
_UpperCAmelCase = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCamelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A) or len(self.rope_scaling) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}")
_UpperCAmelCase = self.rope_scaling.get('type' , A)
_UpperCAmelCase = self.rope_scaling.get('factor' , A)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}")
if rope_scaling_factor is None or not isinstance(A , A) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}")
| 339 | 0 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class a ( UpperCAmelCase ):
_lowercase = (PNDMScheduler,)
_lowercase = (("num_inference_steps", 5_0),)
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Tuple = {
"num_train_timesteps": 1000,
"beta_start": 0.00_01,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**A_ )
return config
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : int = dict(self.forward_default_kwargs )
_UpperCAmelCase : Optional[Any] = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Dict = 0.1 * sample
_UpperCAmelCase : Optional[int] = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Dict = self.get_scheduler_config(**A_ )
_UpperCAmelCase : Any = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
new_scheduler.set_timesteps(A_ )
# copy over dummy past residuals
_UpperCAmelCase : Union[str, Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Any = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Union[str, Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self ):
'''simple docstring'''
pass
def _UpperCAmelCase ( self , A_=0 , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Any = dict(self.forward_default_kwargs )
_UpperCAmelCase : int = kwargs.pop("num_inference_steps" , A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : Any = 0.1 * sample
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : Optional[Any] = self.get_scheduler_config()
_UpperCAmelCase : Dict = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# copy over dummy past residuals (must be after setting timesteps)
_UpperCAmelCase : Any = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(A_ )
_UpperCAmelCase : str = scheduler_class.from_pretrained(A_ )
# copy over dummy past residuals
new_scheduler.set_timesteps(A_ )
# copy over dummy past residual (must be after setting timesteps)
_UpperCAmelCase : List[Any] = dummy_past_residuals[:]
_UpperCAmelCase : Tuple = scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : Union[str, Any] = new_scheduler.step_prk(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
_UpperCAmelCase : List[str] = new_scheduler.step_plms(A_ , A_ , A_ , **A_ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _UpperCAmelCase ( self , **A_ ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.scheduler_classes[0]
_UpperCAmelCase : Union[str, Any] = self.get_scheduler_config(**A_ )
_UpperCAmelCase : List[str] = scheduler_class(**A_ )
_UpperCAmelCase : List[str] = 10
_UpperCAmelCase : Optional[int] = self.dummy_model()
_UpperCAmelCase : List[Any] = self.dummy_sample_deter
scheduler.set_timesteps(A_ )
for i, t in enumerate(scheduler.prk_timesteps ):
_UpperCAmelCase : Dict = model(A_ , A_ )
_UpperCAmelCase : int = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
_UpperCAmelCase : Any = model(A_ , A_ )
_UpperCAmelCase : Any = scheduler.step_plms(A_ , A_ , A_ ).prev_sample
return sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = dict(self.forward_default_kwargs )
_UpperCAmelCase : str = kwargs.pop("num_inference_steps" , A_ )
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : int = self.get_scheduler_config()
_UpperCAmelCase : Optional[Any] = scheduler_class(**A_ )
_UpperCAmelCase : int = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
if num_inference_steps is not None and hasattr(A_ , "set_timesteps" ):
scheduler.set_timesteps(A_ )
elif num_inference_steps is not None and not hasattr(A_ , "set_timesteps" ):
_UpperCAmelCase : Union[str, Any] = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
_UpperCAmelCase : Tuple = [residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
_UpperCAmelCase : Any = dummy_past_residuals[:]
_UpperCAmelCase : Any = scheduler.step_prk(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_prk(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
_UpperCAmelCase : Optional[int] = scheduler.step_plms(A_ , 0 , A_ , **A_ ).prev_sample
_UpperCAmelCase : Optional[Any] = scheduler.step_plms(A_ , 1 , A_ , **A_ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for timesteps in [100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=A_ )
_UpperCAmelCase : List[str] = self.scheduler_classes[0]
_UpperCAmelCase : Optional[int] = self.get_scheduler_config(steps_offset=1 )
_UpperCAmelCase : str = scheduler_class(**A_ )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[901, 851, 851, 801, 801, 751, 751, 701, 701, 651, 651, 601, 601, 501, 401, 301, 201, 101, 1] ) , )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.00_01, 0.0_01] , [0.0_02, 0.02] ):
self.check_over_configs(beta_start=A_ , beta_end=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 100] ):
self.check_over_forward(num_inference_steps=A_ )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : int = 27
for scheduler_class in self.scheduler_classes:
_UpperCAmelCase : str = self.dummy_sample
_UpperCAmelCase : str = 0.1 * sample
_UpperCAmelCase : str = self.get_scheduler_config()
_UpperCAmelCase : Tuple = scheduler_class(**A_ )
scheduler.set_timesteps(A_ )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
_UpperCAmelCase : Dict = scheduler.step_prk(A_ , A_ , A_ ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
with self.assertRaises(A_ ):
_UpperCAmelCase : Union[str, Any] = self.scheduler_classes[0]
_UpperCAmelCase : Dict = self.get_scheduler_config()
_UpperCAmelCase : Union[str, Any] = scheduler_class(**A_ )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Dict = self.full_loop()
_UpperCAmelCase : int = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_98.13_18 ) < 1e-2
assert abs(result_mean.item() - 0.25_80 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Optional[Any] = self.full_loop(prediction_type="v_prediction" )
_UpperCAmelCase : Union[str, Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : List[Any] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 67.39_86 ) < 1e-2
assert abs(result_mean.item() - 0.08_78 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : Tuple = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Dict = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Optional[int] = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 2_30.03_99 ) < 1e-2
assert abs(result_mean.item() - 0.29_95 ) < 1e-3
def _UpperCAmelCase ( self ):
'''simple docstring'''
_UpperCAmelCase : List[Any] = self.full_loop(set_alpha_to_one=A_ , beta_start=0.01 )
_UpperCAmelCase : Optional[Any] = torch.sum(torch.abs(A_ ) )
_UpperCAmelCase : Dict = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1_86.94_82 ) < 1e-2
assert abs(result_mean.item() - 0.24_34 ) < 1e-3
| 189 |
def __SCREAMING_SNAKE_CASE ( lowerCAmelCase: int ) -> int:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise TypeError("Input value must be an 'int' type" )
_UpperCAmelCase : List[Any] = 0
while number:
position += 1
number >>= 1
return position
if __name__ == "__main__":
import doctest
doctest.testmod()
| 189 | 1 |
from __future__ import annotations
def _lowerCAmelCase ( __lowerCAmelCase = 4 ) -> list[list[int]]:
"""simple docstring"""
snake_case__ : Dict = abs(__lowerCAmelCase ) or 4
return [[1 + x + y * row_size for x in range(__lowerCAmelCase )] for y in range(__lowerCAmelCase )]
def _lowerCAmelCase ( __lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(transpose(__lowerCAmelCase ) )
# OR.. transpose(reverse_column(matrix))
def _lowerCAmelCase ( __lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
return reverse_row(reverse_column(__lowerCAmelCase ) )
# OR.. reverse_column(reverse_row(matrix))
def _lowerCAmelCase ( __lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
return reverse_column(transpose(__lowerCAmelCase ) )
# OR.. transpose(reverse_row(matrix))
def _lowerCAmelCase ( __lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
snake_case__ : Tuple = [list(__lowerCAmelCase ) for x in zip(*__lowerCAmelCase )]
return matrix
def _lowerCAmelCase ( __lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
snake_case__ : Any = matrix[::-1]
return matrix
def _lowerCAmelCase ( __lowerCAmelCase ) -> list[list[int]]:
"""simple docstring"""
snake_case__ : str = [x[::-1] for x in matrix]
return matrix
def _lowerCAmelCase ( __lowerCAmelCase ) -> None:
"""simple docstring"""
for i in matrix:
print(*__lowerCAmelCase )
if __name__ == "__main__":
A__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 90 counterclockwise:\n''')
print_matrix(rotate_aa(matrix))
A__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 180:\n''')
print_matrix(rotate_aaa(matrix))
A__ = make_matrix()
print('''\norigin:\n''')
print_matrix(matrix)
print('''\nrotate 270 counterclockwise:\n''')
print_matrix(rotate_aaa(matrix))
| 230 |
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a ( __lowerCamelCase , unittest.TestCase ):
__lowerCAmelCase : int = BertTokenizer
__lowerCAmelCase : int = BertTokenizerFast
__lowerCAmelCase : int = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Any = filter_non_english
def __lowerCamelCase ( self :str ):
super().setUp()
snake_case__ : str = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
snake_case__ : str = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file ,'''w''' ,encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def __lowerCamelCase ( self :List[str] ,__lowercase :str ):
snake_case__ : List[Any] = '''UNwant\u00E9d,running'''
snake_case__ : str = '''unwanted, running'''
return input_text, output_text
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : str = self.tokenizer_class(self.vocab_file )
snake_case__ : str = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(__lowercase ,['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) ,[9, 6, 7, 1_2, 1_0, 1_1] )
def __lowerCamelCase ( self :List[str] ):
if not self.test_rust_tokenizer:
return
snake_case__ : str = self.get_tokenizer()
snake_case__ : Dict = self.get_rust_tokenizer()
snake_case__ : List[Any] = '''UNwant\u00E9d,running'''
snake_case__ : Dict = tokenizer.tokenize(__lowercase )
snake_case__ : int = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : List[str] = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Optional[Any] = self.get_rust_tokenizer()
snake_case__ : List[Any] = tokenizer.encode(__lowercase )
snake_case__ : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
# With lower casing
snake_case__ : List[Any] = self.get_tokenizer(do_lower_case=__lowercase )
snake_case__ : Optional[int] = self.get_rust_tokenizer(do_lower_case=__lowercase )
snake_case__ : int = '''UNwant\u00E9d,running'''
snake_case__ : List[str] = tokenizer.tokenize(__lowercase )
snake_case__ : Union[str, Any] = rust_tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : int = tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = rust_tokenizer.encode(__lowercase ,add_special_tokens=__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : Tuple = self.get_rust_tokenizer()
snake_case__ : int = tokenizer.encode(__lowercase )
snake_case__ : Dict = rust_tokenizer.encode(__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Optional[Any] = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('''ah\u535A\u63A8zz''' ) ,['''ah''', '''\u535A''', '''\u63A8''', '''zz'''] )
def __lowerCamelCase ( self :Any ):
snake_case__ : Dict = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''hello''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hällo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''h\u00E9llo'''] )
def __lowerCamelCase ( self :str ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : Union[str, Any] = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''hallo''', '''!''', '''how''', '''are''', '''you''', '''?'''] )
self.assertListEqual(tokenizer.tokenize('''H\u00E9llo''' ) ,['''hello'''] )
def __lowerCamelCase ( self :Optional[Any] ):
snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? ''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : Any = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HäLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :int ):
snake_case__ : str = BasicTokenizer(do_lower_case=__lowercase ,strip_accents=__lowercase )
self.assertListEqual(
tokenizer.tokenize(''' \tHäLLo!how \n Are yoU? ''' ) ,['''HaLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?'''] )
def __lowerCamelCase ( self :Tuple ):
snake_case__ : List[Any] = BasicTokenizer(do_lower_case=__lowercase ,never_split=['''[UNK]'''] )
self.assertListEqual(
tokenizer.tokenize(''' \tHeLLo!how \n Are yoU? [UNK]''' ) ,['''HeLLo''', '''!''', '''how''', '''Are''', '''yoU''', '''?''', '''[UNK]'''] )
def __lowerCamelCase ( self :Dict ):
snake_case__ : List[str] = BasicTokenizer()
snake_case__ : Any = '''a\n\'ll !!to?\'d of, can\'t.'''
snake_case__ : Optional[int] = ['''a''', '''\'''', '''ll''', '''!''', '''!''', '''to''', '''?''', '''\'''', '''d''', '''of''', ''',''', '''can''', '''\'''', '''t''', '''.''']
self.assertListEqual(tokenizer.tokenize(__lowercase ) ,__lowercase )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Any = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''']
snake_case__ : Optional[Any] = {}
for i, token in enumerate(__lowercase ):
snake_case__ : Dict = i
snake_case__ : int = WordpieceTokenizer(vocab=__lowercase ,unk_token='''[UNK]''' )
self.assertListEqual(tokenizer.tokenize('''''' ) ,[] )
self.assertListEqual(tokenizer.tokenize('''unwanted running''' ) ,['''un''', '''##want''', '''##ed''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.tokenize('''unwantedX running''' ) ,['''[UNK]''', '''runn''', '''##ing'''] )
def __lowerCamelCase ( self :List[Any] ):
self.assertTrue(_is_whitespace(''' ''' ) )
self.assertTrue(_is_whitespace('''\t''' ) )
self.assertTrue(_is_whitespace('''\r''' ) )
self.assertTrue(_is_whitespace('''\n''' ) )
self.assertTrue(_is_whitespace('''\u00A0''' ) )
self.assertFalse(_is_whitespace('''A''' ) )
self.assertFalse(_is_whitespace('''-''' ) )
def __lowerCamelCase ( self :Union[str, Any] ):
self.assertTrue(_is_control('''\u0005''' ) )
self.assertFalse(_is_control('''A''' ) )
self.assertFalse(_is_control(''' ''' ) )
self.assertFalse(_is_control('''\t''' ) )
self.assertFalse(_is_control('''\r''' ) )
def __lowerCamelCase ( self :str ):
self.assertTrue(_is_punctuation('''-''' ) )
self.assertTrue(_is_punctuation('''$''' ) )
self.assertTrue(_is_punctuation('''`''' ) )
self.assertTrue(_is_punctuation('''.''' ) )
self.assertFalse(_is_punctuation('''A''' ) )
self.assertFalse(_is_punctuation(''' ''' ) )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = self.get_tokenizer()
snake_case__ : List[str] = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__lowercase ) for t in ['''Test''', '''\xad''', '''test''']] ,[['''[UNK]'''], [], ['''[UNK]''']] )
@slow
def __lowerCamelCase ( self :List[str] ):
snake_case__ : List[str] = self.tokenizer_class.from_pretrained('''bert-base-uncased''' )
snake_case__ : Union[str, Any] = tokenizer.encode('''sequence builders''' ,add_special_tokens=__lowercase )
snake_case__ : str = tokenizer.encode('''multi-sequence build''' ,add_special_tokens=__lowercase )
snake_case__ : Tuple = tokenizer.build_inputs_with_special_tokens(__lowercase )
snake_case__ : int = tokenizer.build_inputs_with_special_tokens(__lowercase ,__lowercase )
assert encoded_sentence == [1_0_1] + text + [1_0_2]
assert encoded_pair == [1_0_1] + text + [1_0_2] + text_a + [1_0_2]
def __lowerCamelCase ( self :List[Any] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : List[str] = F"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
snake_case__ : List[Any] = tokenizer_r.encode_plus(
__lowercase ,return_attention_mask=__lowercase ,return_token_type_ids=__lowercase ,return_offsets_mapping=__lowercase ,add_special_tokens=__lowercase ,)
snake_case__ : List[str] = tokenizer_r.do_lower_case if hasattr(__lowercase ,'''do_lower_case''' ) else False
snake_case__ : Optional[int] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), '''A'''),
((1, 2), ''','''),
((3, 5), '''na'''),
((5, 6), '''##ï'''),
((6, 8), '''##ve'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''Allen'''),
((2_1, 2_3), '''##NL'''),
((2_3, 2_4), '''##P'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), '''a'''),
((1, 2), ''','''),
((3, 8), '''naive'''),
((9, 1_5), tokenizer_r.mask_token),
((1_6, 2_1), '''allen'''),
((2_1, 2_3), '''##nl'''),
((2_3, 2_4), '''##p'''),
((2_5, 3_3), '''sentence'''),
((3_3, 3_4), '''.'''),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['''input_ids'''] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['''offset_mapping'''] )
def __lowerCamelCase ( self :List[str] ):
snake_case__ : str = ['''的''', '''人''', '''有''']
snake_case__ : Optional[int] = ''''''.join(__lowercase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
snake_case__ : Optional[Any] = True
snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : Union[str, Any] = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[str] = tokenizer_r.convert_ids_to_tokens(__lowercase )
snake_case__ : Optional[Any] = tokenizer_p.convert_ids_to_tokens(__lowercase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
snake_case__ : str = False
snake_case__ : Dict = self.rust_tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : int = self.tokenizer_class.from_pretrained(__lowercase ,**__lowercase )
snake_case__ : int = tokenizer_r.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : Tuple = tokenizer_p.encode(__lowercase ,add_special_tokens=__lowercase )
snake_case__ : List[Any] = tokenizer_r.convert_ids_to_tokens(__lowercase )
snake_case__ : Union[str, Any] = tokenizer_p.convert_ids_to_tokens(__lowercase )
# it is expected that only the first Chinese character is not preceded by "##".
snake_case__ : Optional[int] = [
F"""##{token}""" if idx != 0 else token for idx, token in enumerate(__lowercase )
]
self.assertListEqual(__lowercase ,__lowercase )
self.assertListEqual(__lowercase ,__lowercase )
| 230 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __a ( snake_case__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'trocr'
SCREAMING_SNAKE_CASE_ = ['past_key_values']
SCREAMING_SNAKE_CASE_ = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : List[Any] , lowercase_ : Dict=5_0265 , lowercase_ : List[Any]=1024 , lowercase_ : int=12 , lowercase_ : Optional[int]=16 , lowercase_ : List[Any]=4096 , lowercase_ : Tuple="gelu" , lowercase_ : List[str]=512 , lowercase_ : str=0.1 , lowercase_ : Any=0.0 , lowercase_ : Any=0.0 , lowercase_ : str=2 , lowercase_ : List[Any]=0.0_2 , lowercase_ : Optional[Any]=0.0 , lowercase_ : Optional[int]=True , lowercase_ : str=False , lowercase_ : int=True , lowercase_ : Any=True , lowercase_ : Any=1 , lowercase_ : Optional[int]=0 , lowercase_ : Dict=2 , **lowercase_ : List[str] , ):
UpperCamelCase__ : Optional[Any] =vocab_size
UpperCamelCase__ : Union[str, Any] =d_model
UpperCamelCase__ : List[str] =decoder_layers
UpperCamelCase__ : List[str] =decoder_attention_heads
UpperCamelCase__ : List[Any] =decoder_ffn_dim
UpperCamelCase__ : Optional[int] =activation_function
UpperCamelCase__ : Optional[int] =max_position_embeddings
UpperCamelCase__ : Optional[int] =dropout
UpperCamelCase__ : Tuple =attention_dropout
UpperCamelCase__ : Any =activation_dropout
UpperCamelCase__ : List[Any] =init_std
UpperCamelCase__ : List[str] =decoder_layerdrop
UpperCamelCase__ : Optional[Any] =use_cache
UpperCamelCase__ : str =scale_embedding
UpperCamelCase__ : Optional[Any] =use_learned_position_embeddings
UpperCamelCase__ : Union[str, Any] =layernorm_embedding
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , decoder_start_token_id=lowercase_ , **lowercase_ , )
| 351 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class __a :
"""simple docstring"""
def __init__( self : int , lowercase_ : Optional[Any] , lowercase_ : Union[str, Any]=99 , lowercase_ : Optional[Any]=13 , lowercase_ : Tuple=7 , lowercase_ : Any=9 , lowercase_ : Dict=True , lowercase_ : str=True , lowercase_ : Optional[int]=False , lowercase_ : str=32 , lowercase_ : Tuple=5 , lowercase_ : Union[str, Any]=4 , lowercase_ : Tuple=37 , lowercase_ : int=8 , lowercase_ : str=0.1 , lowercase_ : Optional[Any]=0.0_0_2 , lowercase_ : Any=1 , lowercase_ : Tuple=0 , lowercase_ : Any=0 , lowercase_ : Optional[Any]=None , lowercase_ : str=None , ):
UpperCamelCase__ : Optional[int] =parent
UpperCamelCase__ : int =batch_size
UpperCamelCase__ : Tuple =encoder_seq_length
UpperCamelCase__ : List[Any] =decoder_seq_length
# For common tests
UpperCamelCase__ : str =self.decoder_seq_length
UpperCamelCase__ : List[Any] =is_training
UpperCamelCase__ : Optional[int] =use_attention_mask
UpperCamelCase__ : Union[str, Any] =use_labels
UpperCamelCase__ : List[str] =vocab_size
UpperCamelCase__ : Union[str, Any] =hidden_size
UpperCamelCase__ : Any =num_hidden_layers
UpperCamelCase__ : Optional[int] =num_attention_heads
UpperCamelCase__ : str =d_ff
UpperCamelCase__ : Union[str, Any] =relative_attention_num_buckets
UpperCamelCase__ : Dict =dropout_rate
UpperCamelCase__ : Dict =initializer_factor
UpperCamelCase__ : str =eos_token_id
UpperCamelCase__ : List[str] =pad_token_id
UpperCamelCase__ : List[str] =decoder_start_token_id
UpperCamelCase__ : Optional[Any] =None
UpperCamelCase__ : int =decoder_layers
def _lowerCAmelCase ( self : List[str] ):
return TaConfig.from_pretrained('''google/umt5-base''' )
def _lowerCAmelCase ( self : Optional[int] , lowercase_ : List[Any] , lowercase_ : List[Any] , lowercase_ : Union[str, Any] , lowercase_ : List[Any]=None , lowercase_ : Optional[int]=None , lowercase_ : Union[str, Any]=None , lowercase_ : Tuple=None , lowercase_ : Any=None , ):
if attention_mask is None:
UpperCamelCase__ : List[str] =input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase__ : Union[str, Any] =decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase__ : List[Any] =torch.ones(config.num_hidden_layers , config.num_attention_heads , device=lowercase_ )
if decoder_head_mask is None:
UpperCamelCase__ : List[Any] =torch.ones(config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
if cross_attn_head_mask is None:
UpperCamelCase__ : Any =torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=lowercase_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowerCAmelCase ( self : List[str] ):
UpperCamelCase__ : Dict =ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
UpperCamelCase__ : Any =ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase__ : Tuple =input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : Tuple =decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase__ : List[str] =self.get_config()
UpperCamelCase__ : int =config.num_attention_heads
UpperCamelCase__ : List[Any] =self.prepare_inputs_dict(lowercase_ , lowercase_ , lowercase_ )
return config, input_dict
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ , UpperCamelCase__ : Any =self.prepare_config_and_inputs()
return config, inputs_dict
def _lowerCAmelCase ( self : Optional[int] ):
return TaConfig(
vocab_size=166 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : Any ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowerCAmelCase ( self : int , lowercase_ : str , lowercase_ : List[str] , lowercase_ : List[str] , lowercase_ : Optional[int] , lowercase_ : str , lowercase_ : Dict , ):
UpperCamelCase__ : int =UMTaModel(config=lowercase_ )
model.to(lowercase_ )
model.eval()
UpperCamelCase__ : str =model(
input_ids=lowercase_ , decoder_input_ids=lowercase_ , attention_mask=lowercase_ , decoder_attention_mask=lowercase_ , )
UpperCamelCase__ : Union[str, Any] =model(input_ids=lowercase_ , decoder_input_ids=lowercase_ )
UpperCamelCase__ : List[str] =result.last_hidden_state
UpperCamelCase__ : str =result.past_key_values
UpperCamelCase__ : Any =result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(lowercase_ ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowerCAmelCase ( self : Dict , lowercase_ : Tuple , lowercase_ : Tuple , lowercase_ : Dict , lowercase_ : List[Any] , lowercase_ : Dict , lowercase_ : List[Any] , ):
UpperCamelCase__ : Any =UMTaModel(config=lowercase_ ).get_decoder().to(lowercase_ ).eval()
# first forward pass
UpperCamelCase__ : List[Any] =model(lowercase_ , use_cache=lowercase_ )
UpperCamelCase__ : Optional[Any] =model(lowercase_ )
UpperCamelCase__ : Dict =model(lowercase_ , use_cache=lowercase_ )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) )
self.parent.assertTrue(len(lowercase_ ) == len(lowercase_ ) + 1 )
UpperCamelCase__ , UpperCamelCase__ : str =outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCamelCase__ : List[Any] =ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
UpperCamelCase__ : Union[str, Any] =torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase__ : Optional[int] =model(lowercase_ )['''last_hidden_state''']
UpperCamelCase__ : Dict =model(lowercase_ , past_key_values=lowercase_ )['''last_hidden_state''']
# select random slice
UpperCamelCase__ : List[str] =ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase__ : Any =output_from_no_past[:, -1, random_slice_idx].detach()
UpperCamelCase__ : Dict =output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowercase_ , lowercase_ , atol=1e-3 ) )
def _lowerCAmelCase ( self : List[Any] , lowercase_ : Tuple , lowercase_ : Tuple , ):
UpperCamelCase__ : Tuple =UMTaModel(config=lowercase_ ).to(lowercase_ ).half().eval()
UpperCamelCase__ : Any =model(**lowercase_ )['''last_hidden_state''']
self.parent.assertFalse(torch.isnan(lowercase_ ).any().item() )
@require_torch
class __a ( snake_case__, snake_case__, snake_case__, unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = False
SCREAMING_SNAKE_CASE_ = True
SCREAMING_SNAKE_CASE_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
SCREAMING_SNAKE_CASE_ = [0.8, 0.9]
def _lowerCAmelCase ( self : Union[str, Any] ):
UpperCamelCase__ : Union[str, Any] =UMTaModelTester(self )
@unittest.skip('''Test has a segmentation fault on torch 1.8.0''' )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : Optional[int] =UMTaModel(config_and_inputs[0] ).to(lowercase_ )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
lowercase_ , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=lowercase_ , opset_version=9 , input_names=['''input_ids''', '''decoder_input_ids'''] , )
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''' )
def _lowerCAmelCase ( self : Optional[Any] ):
UpperCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*lowercase_ )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Dict =['''encoder_attentions''', '''decoder_attentions''', '''cross_attentions''']
UpperCamelCase__ : List[str] =self.model_tester.prepare_config_and_inputs()
UpperCamelCase__ : str =config_and_inputs[0]
UpperCamelCase__ : Tuple =UMTaForConditionalGeneration(lowercase_ ).eval()
model.to(lowercase_ )
UpperCamelCase__ : Dict ={
'''head_mask''': torch.zeros(config.num_layers , config.num_heads , device=lowercase_ ),
'''decoder_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
'''cross_attn_head_mask''': torch.zeros(config.num_decoder_layers , config.num_heads , device=lowercase_ ),
}
for attn_name, (name, mask) in zip(lowercase_ , head_masking.items() ):
UpperCamelCase__ : Optional[int] ={name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
UpperCamelCase__ : Tuple =torch.ones(
config.num_decoder_layers , config.num_heads , device=lowercase_ )
UpperCamelCase__ : str =model.generate(
config_and_inputs[1]['''input_ids'''] , num_beams=1 , max_length=3 , output_attentions=lowercase_ , return_dict_in_generate=lowercase_ , **lowercase_ , )
# We check the state of decoder_attentions and cross_attentions just from the last step
UpperCamelCase__ : Union[str, Any] =out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip('''Does not work on the tiny model as we keep hitting edge cases.''' )
def _lowerCAmelCase ( self : Any ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
'''Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged''' )
def _lowerCAmelCase ( self : List[Any] ):
UpperCamelCase__ : Optional[int] =UMTaForConditionalGeneration.from_pretrained('''google/umt5-small''' , return_dict=lowercase_ ).to(lowercase_ )
UpperCamelCase__ : Any =AutoTokenizer.from_pretrained('''google/umt5-small''' , use_fast=lowercase_ , legacy=lowercase_ )
UpperCamelCase__ : int =[
'''Bonjour monsieur <extra_id_0> bien <extra_id_1>.''',
'''No se como puedo <extra_id_0>.''',
'''This is the reason why we <extra_id_0> them.''',
'''The <extra_id_0> walks in <extra_id_1>, seats''',
'''A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.''',
]
UpperCamelCase__ : Optional[int] =tokenizer(lowercase_ , return_tensors='''pt''' , padding=lowercase_ ).input_ids
# fmt: off
UpperCamelCase__ : int =torch.tensor(
[
[ 3_8530, 21_0703, 25_6299, 1410, 25_6298, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 826, 321, 671, 2_5922, 25_6299, 274, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1460, 339, 312, 1_9014, 1_0620, 758, 25_6299, 2355,274, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 517, 25_6299, 1_4869, 281, 301, 25_6298, 275, 11_9983,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 320, 25_6299, 1_4869, 281, 2234, 289, 2275, 333,6_1391, 289, 25_6298, 543, 25_6297, 16_8714, 329, 25_6296,274, 1],
] )
# fmt: on
torch.testing.assert_allclose(lowercase_ , lowercase_ )
UpperCamelCase__ : Optional[int] =model.generate(input_ids.to(lowercase_ ) )
UpperCamelCase__ : int =[
'''<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>''',
'''<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
'''<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>''',
]
UpperCamelCase__ : Optional[Any] =tokenizer.batch_decode(lowercase_ )
self.assertEqual(lowercase_ , lowercase_ )
| 157 | 0 |
import argparse
import torch
from torch import nn
from transformers import SpeechaTextConfig, SpeechaTextForConditionalGeneration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: int ) -> Optional[int]:
'''simple docstring'''
A__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"decoder.output_projection.weight",
"_float_tensor",
"encoder.embed_positions._float_tensor",
"decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str ) -> Union[str, Any]:
'''simple docstring'''
A__ = list(s_dict.keys() )
for key in keys:
if "transformer_layers" in key:
A__ = s_dict.pop(SCREAMING_SNAKE_CASE_ )
elif "subsample" in key:
A__ = s_dict.pop(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: Tuple ) -> Union[str, Any]:
'''simple docstring'''
A__ , A__ = emb.weight.shape
A__ = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
A__ = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_: str , SCREAMING_SNAKE_CASE_: List[str] ) -> str:
'''simple docstring'''
A__ = torch.load(SCREAMING_SNAKE_CASE_ , map_location="cpu" )
A__ = mam_aaa["args"]
A__ = mam_aaa["model"]
A__ = state_dict["decoder.output_projection.weight"]
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
rename_keys(SCREAMING_SNAKE_CASE_ )
A__ = state_dict["decoder.embed_tokens.weight"].shape[0]
A__ = args.share_decoder_input_output_embed
A__ = [int(SCREAMING_SNAKE_CASE_ ) for i in args.conv_kernel_sizes.split("," )]
A__ = SpeechaTextConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_source_positions=args.max_source_positions , max_target_positions=args.max_target_positions , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="relu" , num_conv_layers=len(SCREAMING_SNAKE_CASE_ ) , conv_channels=args.conv_channels , conv_kernel_sizes=SCREAMING_SNAKE_CASE_ , input_feat_per_channel=args.input_feat_per_channel , input_channels=args.input_channels , tie_word_embeddings=SCREAMING_SNAKE_CASE_ , num_beams=5 , max_length=2_0_0 , use_cache=SCREAMING_SNAKE_CASE_ , decoder_start_token_id=2 , early_stopping=SCREAMING_SNAKE_CASE_ , )
A__ = SpeechaTextForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
A__ , A__ = model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0 and not set(SCREAMING_SNAKE_CASE_ ) <= {
"encoder.embed_positions.weights",
"decoder.embed_positions.weights",
}:
raise ValueError(
"Only `encoder.embed_positions.weights` and `decoder.embed_positions.weights` are allowed to be missing,"
F' but all the following weights are missing {missing}' )
if tie_embeds:
A__ = make_linear_from_emb(model.model.decoder.embed_tokens )
else:
A__ = lm_head_weights
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--fairseq_path""", type=str, help="""Path to the fairseq model (.pt) file.""")
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCAmelCase__ = parser.parse_args()
convert_fairseq_sat_checkpoint_to_tfms(args.fairseq_path, args.pytorch_dump_folder_path)
| 68 |
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
lowerCAmelCase__ = logging.get_logger(__name__)
class a__ ( snake_case ):
"""simple docstring"""
def __init__( self , lowercase , lowercase , lowercase , **lowercase ) -> Union[str, Any]:
'''simple docstring'''
A__ = feature_size
A__ = sampling_rate
A__ = padding_value
A__ = kwargs.pop("padding_side" , "right" )
A__ = kwargs.pop("return_attention_mask" , lowercase )
super().__init__(**lowercase )
def UpperCamelCase ( self , lowercase , lowercase = True , lowercase = None , lowercase = False , lowercase = None , lowercase = None , lowercase = None , ) -> BatchFeature:
'''simple docstring'''
if isinstance(lowercase , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
A__ = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
"You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`"
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
A__ = processed_features[self.model_input_names[0]]
A__ = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(lowercase ) == 0:
if return_attention_mask:
A__ = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A__ = required_input[0]
if isinstance(lowercase , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A__ = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(lowercase ):
A__ = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(lowercase ):
A__ = "tf"
elif is_torch_tensor(lowercase ):
A__ = "pt"
elif isinstance(lowercase , (int, float, list, tuple, np.ndarray) ):
A__ = "np"
else:
raise ValueError(
F'type of {first_element} unknown: {type(lowercase )}. '
"Should be one of a python, numpy, pytorch or tensorflow object." )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
A__ = to_numpy(lowercase )
else:
A__ = [to_numpy(lowercase ) for v in value]
# Convert padding_strategy in PaddingStrategy
A__ = self._get_padding_strategies(padding=lowercase , max_length=lowercase )
A__ = processed_features[self.model_input_names[0]]
A__ = len(lowercase )
if not all(len(lowercase ) == batch_size for v in processed_features.values() ):
raise ValueError("Some items in the output dictionary have a different batch size than others." )
A__ = []
for i in range(lowercase ):
A__ = {k: v[i] for k, v in processed_features.items()}
# truncation
A__ = self._truncate(
lowercase , max_length=lowercase , pad_to_multiple_of=lowercase , truncation=lowercase , )
truncated_inputs.append(lowercase )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A__ = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A__ = PaddingStrategy.MAX_LENGTH
A__ = {}
for i in range(lowercase ):
# padding
A__ = self._pad(
truncated_inputs[i] , max_length=lowercase , padding_strategy=lowercase , pad_to_multiple_of=lowercase , return_attention_mask=lowercase , )
for key, value in outputs.items():
if key not in batch_outputs:
A__ = []
if value.dtype is np.dtype(np.floataa ):
A__ = value.astype(np.floataa )
batch_outputs[key].append(lowercase )
return BatchFeature(lowercase , tensor_type=lowercase )
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = PaddingStrategy.DO_NOT_PAD , lowercase = None , lowercase = None , ) -> dict:
'''simple docstring'''
A__ = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A__ = len(lowercase )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(lowercase ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A__ = np.ones(len(lowercase ) , dtype=np.intaa )
if needs_to_be_padded:
A__ = max_length - len(lowercase )
if self.padding_side == "right":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (0, difference) )
A__ = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A__ = np.pad(
processed_features["attention_mask"] , (difference, 0) )
A__ = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A__ = np.pad(
lowercase , lowercase , "constant" , constant_values=self.padding_value )
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return processed_features
def UpperCamelCase ( self , lowercase , lowercase = None , lowercase = None , lowercase = None , ) -> Union[str, Any]:
'''simple docstring'''
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError("When setting ``truncation=True``, make sure that ``max_length`` is defined." )
A__ = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A__ = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A__ = len(lowercase ) > max_length
if needs_to_be_truncated:
A__ = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A__ = processed_features["attention_mask"][:max_length]
return processed_features
def UpperCamelCase ( self , lowercase=False , lowercase=None ) -> Any:
'''simple docstring'''
if padding is not False:
if padding is True:
A__ = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(lowercase , lowercase ):
A__ = PaddingStrategy(lowercase )
elif isinstance(lowercase , lowercase ):
A__ = padding
else:
A__ = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
"Asking to pad but the feature_extractor does not have a padding value. Please select a value to use"
" as `padding_value`. For example: `feature_extractor.padding_value = 0.0`." )
return padding_strategy
| 68 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase_ = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
lowerCAmelCase_ = {
'''yjernite/retribert-base-uncased''': 5_1_2,
}
lowerCAmelCase_ = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCAmelCase ( _a ):
lowerCamelCase_ : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase_ : List[Any] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase_ : Tuple = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase_ : str = RetriBertTokenizer
lowerCamelCase_ : Dict = ['''input_ids''', '''attention_mask''']
def __init__(self , __magic_name__=None , __magic_name__=None , __magic_name__=True , __magic_name__="[UNK]" , __magic_name__="[SEP]" , __magic_name__="[PAD]" , __magic_name__="[CLS]" , __magic_name__="[MASK]" , __magic_name__=True , __magic_name__=None , **__magic_name__ , ) -> Dict:
'''simple docstring'''
super().__init__(
__magic_name__ , tokenizer_file=__magic_name__ , do_lower_case=__magic_name__ , unk_token=__magic_name__ , sep_token=__magic_name__ , pad_token=__magic_name__ , cls_token=__magic_name__ , mask_token=__magic_name__ , tokenize_chinese_chars=__magic_name__ , strip_accents=__magic_name__ , **__magic_name__ , )
snake_case_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , __magic_name__ ) != do_lower_case
or normalizer_state.get('''strip_accents''' , __magic_name__ ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , __magic_name__ ) != tokenize_chinese_chars
):
snake_case_ : Optional[int] = getattr(__magic_name__ , normalizer_state.pop('''type''' ) )
snake_case_ : str = do_lower_case
snake_case_ : List[Any] = strip_accents
snake_case_ : int = tokenize_chinese_chars
snake_case_ : List[Any] = normalizer_class(**__magic_name__ )
snake_case_ : int = do_lower_case
def lowerCamelCase (self , __magic_name__ , __magic_name__=None ) -> Optional[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> List[int]:
'''simple docstring'''
snake_case_ : List[str] = [self.sep_token_id]
snake_case_ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase (self , __magic_name__ , __magic_name__ = None ) -> Tuple[str]:
'''simple docstring'''
snake_case_ : Union[str, Any] = self._tokenizer.model.save(__magic_name__ , name=__magic_name__ )
return tuple(__magic_name__ )
| 279 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( _UpperCamelCase ) -> tuple:
"""simple docstring"""
return (data["data"], data["target"])
def lowerCamelCase_ ( _UpperCamelCase , _UpperCamelCase ) -> XGBClassifier:
"""simple docstring"""
snake_case_ : Optional[Any] = XGBClassifier()
classifier.fit(_UpperCamelCase , _UpperCamelCase )
return classifier
def lowerCamelCase_ ( ) -> None:
"""simple docstring"""
snake_case_ : Optional[Any] = load_iris()
snake_case_ , snake_case_ : str = data_handling(_UpperCamelCase )
snake_case_ , snake_case_ , snake_case_ , snake_case_ : Dict = train_test_split(
_UpperCamelCase , _UpperCamelCase , test_size=0.25 )
snake_case_ : List[str] = iris['''target_names''']
# Create an XGBoost Classifier from the training data
snake_case_ : int = xgboost(_UpperCamelCase , _UpperCamelCase )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , display_labels=_UpperCamelCase , cmap='''Blues''' , normalize='''true''' , )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''' )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 279 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
a_ = {
'Acehnese Arabic': 'ace_Arab',
'Acehnese Latin': 'ace_Latn',
'Mesopotamian Arabic': 'acm_Arab',
'Ta\'izzi-Adeni Arabic': 'acq_Arab',
'Tunisian Arabic': 'aeb_Arab',
'Afrikaans': 'afr_Latn',
'South Levantine Arabic': 'ajp_Arab',
'Akan': 'aka_Latn',
'Amharic': 'amh_Ethi',
'North Levantine Arabic': 'apc_Arab',
'Modern Standard Arabic': 'arb_Arab',
'Modern Standard Arabic Romanized': 'arb_Latn',
'Najdi Arabic': 'ars_Arab',
'Moroccan Arabic': 'ary_Arab',
'Egyptian Arabic': 'arz_Arab',
'Assamese': 'asm_Beng',
'Asturian': 'ast_Latn',
'Awadhi': 'awa_Deva',
'Central Aymara': 'ayr_Latn',
'South Azerbaijani': 'azb_Arab',
'North Azerbaijani': 'azj_Latn',
'Bashkir': 'bak_Cyrl',
'Bambara': 'bam_Latn',
'Balinese': 'ban_Latn',
'Belarusian': 'bel_Cyrl',
'Bemba': 'bem_Latn',
'Bengali': 'ben_Beng',
'Bhojpuri': 'bho_Deva',
'Banjar Arabic': 'bjn_Arab',
'Banjar Latin': 'bjn_Latn',
'Standard Tibetan': 'bod_Tibt',
'Bosnian': 'bos_Latn',
'Buginese': 'bug_Latn',
'Bulgarian': 'bul_Cyrl',
'Catalan': 'cat_Latn',
'Cebuano': 'ceb_Latn',
'Czech': 'ces_Latn',
'Chokwe': 'cjk_Latn',
'Central Kurdish': 'ckb_Arab',
'Crimean Tatar': 'crh_Latn',
'Welsh': 'cym_Latn',
'Danish': 'dan_Latn',
'German': 'deu_Latn',
'Southwestern Dinka': 'dik_Latn',
'Dyula': 'dyu_Latn',
'Dzongkha': 'dzo_Tibt',
'Greek': 'ell_Grek',
'English': 'eng_Latn',
'Esperanto': 'epo_Latn',
'Estonian': 'est_Latn',
'Basque': 'eus_Latn',
'Ewe': 'ewe_Latn',
'Faroese': 'fao_Latn',
'Fijian': 'fij_Latn',
'Finnish': 'fin_Latn',
'Fon': 'fon_Latn',
'French': 'fra_Latn',
'Friulian': 'fur_Latn',
'Nigerian Fulfulde': 'fuv_Latn',
'Scottish Gaelic': 'gla_Latn',
'Irish': 'gle_Latn',
'Galician': 'glg_Latn',
'Guarani': 'grn_Latn',
'Gujarati': 'guj_Gujr',
'Haitian Creole': 'hat_Latn',
'Hausa': 'hau_Latn',
'Hebrew': 'heb_Hebr',
'Hindi': 'hin_Deva',
'Chhattisgarhi': 'hne_Deva',
'Croatian': 'hrv_Latn',
'Hungarian': 'hun_Latn',
'Armenian': 'hye_Armn',
'Igbo': 'ibo_Latn',
'Ilocano': 'ilo_Latn',
'Indonesian': 'ind_Latn',
'Icelandic': 'isl_Latn',
'Italian': 'ita_Latn',
'Javanese': 'jav_Latn',
'Japanese': 'jpn_Jpan',
'Kabyle': 'kab_Latn',
'Jingpho': 'kac_Latn',
'Kamba': 'kam_Latn',
'Kannada': 'kan_Knda',
'Kashmiri Arabic': 'kas_Arab',
'Kashmiri Devanagari': 'kas_Deva',
'Georgian': 'kat_Geor',
'Central Kanuri Arabic': 'knc_Arab',
'Central Kanuri Latin': 'knc_Latn',
'Kazakh': 'kaz_Cyrl',
'Kabiyè': 'kbp_Latn',
'Kabuverdianu': 'kea_Latn',
'Khmer': 'khm_Khmr',
'Kikuyu': 'kik_Latn',
'Kinyarwanda': 'kin_Latn',
'Kyrgyz': 'kir_Cyrl',
'Kimbundu': 'kmb_Latn',
'Northern Kurdish': 'kmr_Latn',
'Kikongo': 'kon_Latn',
'Korean': 'kor_Hang',
'Lao': 'lao_Laoo',
'Ligurian': 'lij_Latn',
'Limburgish': 'lim_Latn',
'Lingala': 'lin_Latn',
'Lithuanian': 'lit_Latn',
'Lombard': 'lmo_Latn',
'Latgalian': 'ltg_Latn',
'Luxembourgish': 'ltz_Latn',
'Luba-Kasai': 'lua_Latn',
'Ganda': 'lug_Latn',
'Luo': 'luo_Latn',
'Mizo': 'lus_Latn',
'Standard Latvian': 'lvs_Latn',
'Magahi': 'mag_Deva',
'Maithili': 'mai_Deva',
'Malayalam': 'mal_Mlym',
'Marathi': 'mar_Deva',
'Minangkabau Arabic ': 'min_Arab',
'Minangkabau Latin': 'min_Latn',
'Macedonian': 'mkd_Cyrl',
'Plateau Malagasy': 'plt_Latn',
'Maltese': 'mlt_Latn',
'Meitei Bengali': 'mni_Beng',
'Halh Mongolian': 'khk_Cyrl',
'Mossi': 'mos_Latn',
'Maori': 'mri_Latn',
'Burmese': 'mya_Mymr',
'Dutch': 'nld_Latn',
'Norwegian Nynorsk': 'nno_Latn',
'Norwegian Bokmål': 'nob_Latn',
'Nepali': 'npi_Deva',
'Northern Sotho': 'nso_Latn',
'Nuer': 'nus_Latn',
'Nyanja': 'nya_Latn',
'Occitan': 'oci_Latn',
'West Central Oromo': 'gaz_Latn',
'Odia': 'ory_Orya',
'Pangasinan': 'pag_Latn',
'Eastern Panjabi': 'pan_Guru',
'Papiamento': 'pap_Latn',
'Western Persian': 'pes_Arab',
'Polish': 'pol_Latn',
'Portuguese': 'por_Latn',
'Dari': 'prs_Arab',
'Southern Pashto': 'pbt_Arab',
'Ayacucho Quechua': 'quy_Latn',
'Romanian': 'ron_Latn',
'Rundi': 'run_Latn',
'Russian': 'rus_Cyrl',
'Sango': 'sag_Latn',
'Sanskrit': 'san_Deva',
'Santali': 'sat_Olck',
'Sicilian': 'scn_Latn',
'Shan': 'shn_Mymr',
'Sinhala': 'sin_Sinh',
'Slovak': 'slk_Latn',
'Slovenian': 'slv_Latn',
'Samoan': 'smo_Latn',
'Shona': 'sna_Latn',
'Sindhi': 'snd_Arab',
'Somali': 'som_Latn',
'Southern Sotho': 'sot_Latn',
'Spanish': 'spa_Latn',
'Tosk Albanian': 'als_Latn',
'Sardinian': 'srd_Latn',
'Serbian': 'srp_Cyrl',
'Swati': 'ssw_Latn',
'Sundanese': 'sun_Latn',
'Swedish': 'swe_Latn',
'Swahili': 'swh_Latn',
'Silesian': 'szl_Latn',
'Tamil': 'tam_Taml',
'Tatar': 'tat_Cyrl',
'Telugu': 'tel_Telu',
'Tajik': 'tgk_Cyrl',
'Tagalog': 'tgl_Latn',
'Thai': 'tha_Thai',
'Tigrinya': 'tir_Ethi',
'Tamasheq Latin': 'taq_Latn',
'Tamasheq Tifinagh': 'taq_Tfng',
'Tok Pisin': 'tpi_Latn',
'Tswana': 'tsn_Latn',
'Tsonga': 'tso_Latn',
'Turkmen': 'tuk_Latn',
'Tumbuka': 'tum_Latn',
'Turkish': 'tur_Latn',
'Twi': 'twi_Latn',
'Central Atlas Tamazight': 'tzm_Tfng',
'Uyghur': 'uig_Arab',
'Ukrainian': 'ukr_Cyrl',
'Umbundu': 'umb_Latn',
'Urdu': 'urd_Arab',
'Northern Uzbek': 'uzn_Latn',
'Venetian': 'vec_Latn',
'Vietnamese': 'vie_Latn',
'Waray': 'war_Latn',
'Wolof': 'wol_Latn',
'Xhosa': 'xho_Latn',
'Eastern Yiddish': 'ydd_Hebr',
'Yoruba': 'yor_Latn',
'Yue Chinese': 'yue_Hant',
'Chinese Simplified': 'zho_Hans',
'Chinese Traditional': 'zho_Hant',
'Standard Malay': 'zsm_Latn',
'Zulu': 'zul_Latn',
}
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = """facebook/nllb-200-distilled-600M"""
snake_case_ = (
"""This is a tool that translates text from a language to another. It takes three inputs: `text`, which should """
"""be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, """
"""which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in """
"""plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."""
)
snake_case_ = """translator"""
snake_case_ = AutoTokenizer
snake_case_ = AutoModelForSeqaSeqLM
snake_case_ = LANGUAGE_CODES
snake_case_ = ["""text""", """text""", """text"""]
snake_case_ = ["""text"""]
def __magic_name__ ( self : Dict , __lowercase : str , __lowercase : int , __lowercase : Union[str, Any] ) -> Optional[Any]:
if src_lang not in self.lang_to_code:
raise ValueError(F"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(F"{tgt_lang} is not a supported language." )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.lang_to_code[src_lang]
SCREAMING_SNAKE_CASE__ : int =self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
__lowercase , return_tensors='''pt''' , src_lang=__lowercase , tgt_lang=__lowercase )
def __magic_name__ ( self : Dict , __lowercase : Optional[int] ) -> List[Any]:
return self.model.generate(**__lowercase )
def __magic_name__ ( self : List[Any] , __lowercase : Dict ) -> Optional[Any]:
return self.post_processor.decode(outputs[0].tolist() , skip_special_tokens=__lowercase ) | 152 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class __SCREAMING_SNAKE_CASE ( lowerCamelCase ):
snake_case_ = ["""image_processor""", """tokenizer"""]
snake_case_ = """CLIPImageProcessor"""
snake_case_ = ("""XLMRobertaTokenizer""", """XLMRobertaTokenizerFast""")
def __init__( self : List[Any] , __lowercase : Union[str, Any]=None , __lowercase : int=None , **__lowercase : Any ) -> Tuple:
SCREAMING_SNAKE_CASE__ : str =None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , __lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(__lowercase , __lowercase )
def __call__( self : Union[str, Any] , __lowercase : Optional[Any]=None , __lowercase : Union[str, Any]=None , __lowercase : List[str]=None , **__lowercase : str ) -> Tuple:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : int =image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def __magic_name__ ( self : int , *__lowercase : Optional[Any] , **__lowercase : Tuple ) -> Dict:
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def __magic_name__ ( self : List[Any] , *__lowercase : Optional[Any] , **__lowercase : Union[str, Any] ) -> Union[str, Any]:
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def __magic_name__ ( self : List[Any] ) -> Tuple:
SCREAMING_SNAKE_CASE__ : Optional[int] =self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ : str =self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) ) | 152 | 1 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'BridgeTower/bridgetower-base': 'https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json',
'BridgeTower/bridgetower-base-itm-mlm': (
'https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'
),
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[str] = '''bridgetower_vision_model'''
def __init__( self , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=3 , _UpperCamelCase=1_6 , _UpperCamelCase=2_8_8 , _UpperCamelCase=1 , _UpperCamelCase=1E-05 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=False , **_UpperCamelCase , ) -> Optional[int]:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = hidden_size
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : List[Any] = num_channels
UpperCAmelCase_ : str = patch_size
UpperCAmelCase_ : Tuple = image_size
UpperCAmelCase_ : str = initializer_factor
UpperCAmelCase_ : int = layer_norm_eps
UpperCAmelCase_ : Tuple = stop_gradient
UpperCAmelCase_ : Union[str, Any] = share_layernorm
UpperCAmelCase_ : Any = remove_last_layer
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , **_UpperCamelCase ) -> "PretrainedConfig":
UpperCAmelCase_ : Optional[Any] = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
if config_dict.get('model_type' ) == "bridgetower":
UpperCAmelCase_ : List[Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : Any = '''bridgetower_text_model'''
def __init__( self , _UpperCamelCase=5_0_2_6_5 , _UpperCamelCase=7_6_8 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=1 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_4 , _UpperCamelCase=1 , _UpperCamelCase=1E-05 , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , _UpperCamelCase="absolute" , _UpperCamelCase=True , **_UpperCamelCase , ) -> Optional[int]:
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = vocab_size
UpperCAmelCase_ : Optional[Any] = hidden_size
UpperCAmelCase_ : Tuple = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : str = hidden_act
UpperCAmelCase_ : Optional[int] = initializer_factor
UpperCAmelCase_ : str = intermediate_size
UpperCAmelCase_ : Tuple = hidden_dropout_prob
UpperCAmelCase_ : str = attention_probs_dropout_prob
UpperCAmelCase_ : List[str] = max_position_embeddings
UpperCAmelCase_ : Tuple = type_vocab_size
UpperCAmelCase_ : Optional[int] = layer_norm_eps
UpperCAmelCase_ : Any = position_embedding_type
UpperCAmelCase_ : Tuple = use_cache
UpperCAmelCase_ : Dict = pad_token_id
UpperCAmelCase_ : Optional[Any] = bos_token_id
UpperCAmelCase_ : int = eos_token_id
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , **_UpperCamelCase ) -> "PretrainedConfig":
UpperCAmelCase_ : List[Any] = cls.get_config_dict(_UpperCamelCase , **_UpperCamelCase )
if config_dict.get('model_type' ) == "bridgetower":
UpperCAmelCase_ : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_UpperCamelCase , **_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''bridgetower'''
def __init__( self , _UpperCamelCase=True , _UpperCamelCase="gelu" , _UpperCamelCase=7_6_8 , _UpperCamelCase=1 , _UpperCamelCase=1E-05 , _UpperCamelCase=False , _UpperCamelCase="add" , _UpperCamelCase=1_2 , _UpperCamelCase=6 , _UpperCamelCase=False , _UpperCamelCase=False , _UpperCamelCase=None , _UpperCamelCase=None , **_UpperCamelCase , ) -> Union[str, Any]:
# TODO: remove this once the Hub files are updated.
UpperCAmelCase_ : Dict = kwargs.pop('text_config_dict' , _UpperCamelCase )
UpperCAmelCase_ : Union[str, Any] = kwargs.pop('vision_config_dict' , _UpperCamelCase )
super().__init__(**_UpperCamelCase )
UpperCAmelCase_ : int = share_cross_modal_transformer_layers
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_size
UpperCAmelCase_ : Optional[Any] = initializer_factor
UpperCAmelCase_ : Union[str, Any] = layer_norm_eps
UpperCAmelCase_ : Optional[Any] = share_link_tower_layers
UpperCAmelCase_ : Dict = link_tower_type
UpperCAmelCase_ : Union[str, Any] = num_attention_heads
UpperCAmelCase_ : int = num_hidden_layers
UpperCAmelCase_ : int = tie_word_embeddings
UpperCAmelCase_ : int = init_layernorm_from_vision_encoder
if text_config is None:
UpperCAmelCase_ : Optional[int] = {}
logger.info('`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.' )
if vision_config is None:
UpperCAmelCase_ : List[str] = {}
logger.info('`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.' )
UpperCAmelCase_ : Any = BridgeTowerTextConfig(**_UpperCamelCase )
UpperCAmelCase_ : Any = BridgeTowerVisionConfig(**_UpperCamelCase )
@classmethod
def __UpperCAmelCase ( cls , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Optional[Any]:
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.__dict__ )
UpperCAmelCase_ : Any = self.text_config.to_dict()
UpperCAmelCase_ : Union[str, Any] = self.vision_config.to_dict()
UpperCAmelCase_ : Any = self.__class__.model_type
return output
| 355 |
from __future__ import annotations
from collections.abc import Callable
__UpperCAmelCase = list[list[float | int]]
def lowercase__ ( __snake_case : Matrix , __snake_case : Matrix ):
'''simple docstring'''
UpperCAmelCase_ : int = len(__snake_case )
UpperCAmelCase_ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(__snake_case )]
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : float
for row in range(__snake_case ):
for col in range(__snake_case ):
UpperCAmelCase_ : Dict = matrix[row][col]
UpperCAmelCase_ : Union[str, Any] = vector[row][0]
UpperCAmelCase_ : Union[str, Any] = 0
UpperCAmelCase_ : Union[str, Any] = 0
while row < size and col < size:
# pivoting
UpperCAmelCase_ : Optional[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__snake_case , __snake_case ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
UpperCAmelCase_ , UpperCAmelCase_ : Tuple = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __snake_case ):
UpperCAmelCase_ : Optional[int] = augmented[rowa][col] / augmented[row][col]
UpperCAmelCase_ : List[str] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __snake_case ):
for row in range(__snake_case ):
UpperCAmelCase_ : Union[str, Any] = augmented[row][col] / augmented[col][col]
for cola in range(__snake_case , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__snake_case )
]
def lowercase__ ( __snake_case : list[int] ):
'''simple docstring'''
UpperCAmelCase_ : int = len(__snake_case )
UpperCAmelCase_ : Matrix = [[0 for _ in range(__snake_case )] for _ in range(__snake_case )]
UpperCAmelCase_ : Matrix = [[0] for _ in range(__snake_case )]
UpperCAmelCase_ : Matrix
UpperCAmelCase_ : int
UpperCAmelCase_ : int
UpperCAmelCase_ : int
for x_val, y_val in enumerate(__snake_case ):
for col in range(__snake_case ):
UpperCAmelCase_ : int = (x_val + 1) ** (size - col - 1)
UpperCAmelCase_ : int = y_val
UpperCAmelCase_ : List[str] = solve(__snake_case , __snake_case )
def interpolated_func(__snake_case : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__snake_case ) )
return interpolated_func
def lowercase__ ( __snake_case : int ):
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowercase__ ( __snake_case : Callable[[int], int] = question_function , __snake_case : int = 10 ):
'''simple docstring'''
UpperCAmelCase_ : list[int] = [func(__snake_case ) for x_val in range(1 , order + 1 )]
UpperCAmelCase_ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : Callable[[int], int]
UpperCAmelCase_ : int
for poly in polynomials:
UpperCAmelCase_ : Optional[int] = 1
while func(__snake_case ) == poly(__snake_case ):
x_val += 1
ret += poly(__snake_case )
return ret
if __name__ == "__main__":
print(F'{solution() = }')
| 145 | 0 |
from __future__ import annotations
__UpperCAmelCase = list[list[int]]
# assigning initial values to the grid
__UpperCAmelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
__UpperCAmelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def lowercase__ ( __snake_case : Matrix , __snake_case : int , __snake_case : int , __snake_case : int ):
'''simple docstring'''
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def lowercase__ ( __snake_case : Matrix ):
'''simple docstring'''
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def lowercase__ ( __snake_case : Matrix ):
'''simple docstring'''
if location := find_empty_location(__snake_case ):
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__snake_case , __snake_case , __snake_case , __snake_case ):
UpperCAmelCase_ : Any = digit
if sudoku(__snake_case ) is not None:
return grid
UpperCAmelCase_ : Tuple = 0
return None
def lowercase__ ( __snake_case : Matrix ):
'''simple docstring'''
for row in grid:
for cell in row:
print(__snake_case , end=' ' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
__UpperCAmelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 29 |
import os
# Precomputes a list of the 100 first triangular numbers
__UpperCAmelCase = [int(0.5 * n * (n + 1)) for n in range(1, 101)]
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Any = os.path.dirname(os.path.realpath(__snake_case ) )
UpperCAmelCase_ : Optional[Any] = os.path.join(__snake_case , 'words.txt' )
UpperCAmelCase_ : Union[str, Any] = ''
with open(__snake_case ) as f:
UpperCAmelCase_ : List[Any] = f.readline()
UpperCAmelCase_ : Optional[int] = [word.strip('"' ) for word in words.strip('\r\n' ).split(',' )]
UpperCAmelCase_ : Optional[int] = [
word
for word in [sum(ord(__snake_case ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(__snake_case )
if __name__ == "__main__":
print(solution())
| 29 | 1 |
'''simple docstring'''
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
UpperCamelCase__ : Union[str, Any] = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def a__ ( lowerCAmelCase__ ) -> str:
UpperCAmelCase__ : Optional[int] = test_results.split(''' ''' )
UpperCAmelCase__ : Tuple = 0
UpperCAmelCase__ : Tuple = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
UpperCAmelCase__ : List[Any] = expressions[-2] if '''=''' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase__ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def a__ ( lowerCAmelCase__ ) -> Tuple:
UpperCAmelCase__ : int = {}
UpperCAmelCase__ : List[str] = None
UpperCAmelCase__ : Optional[Any] = False
for line in failures_short_lines.split('''\n''' ):
if re.search(R'''_ \[doctest\]''' , lowerCAmelCase__ ):
UpperCAmelCase__ : Tuple = True
UpperCAmelCase__ : Optional[Any] = line.split(''' ''' )[2]
elif in_error and not line.split(''' ''' )[0].isdigit():
UpperCAmelCase__ : List[Any] = line
UpperCAmelCase__ : Union[str, Any] = False
return failures
class lowerCamelCase_ :
def __init__( self : Tuple , _A : str , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = title
UpperCAmelCase__ : Optional[int] = doc_test_results['''time_spent'''].split(''',''' )[0]
UpperCAmelCase__ : Optional[int] = doc_test_results['''success''']
UpperCAmelCase__ : Optional[Any] = doc_test_results['''failures''']
UpperCAmelCase__ : Union[str, Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
UpperCAmelCase__ : Tuple = doc_test_results
@property
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Union[str, Any] = [self._time_spent]
UpperCAmelCase__ : List[str] = 0
for time in time_spent:
UpperCAmelCase__ : Tuple = time.split(''':''' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_A ) == 1:
UpperCAmelCase__ : str = [0, 0, time_parts[0]]
UpperCAmelCase__ : Optional[int] = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3_600 + minutes * 60 + seconds
UpperCAmelCase__ : List[str] = total_secs // 3_600, (total_secs % 3_600) // 60, total_secs % 60
return f"""{int(_A )}h{int(_A )}m{int(_A )}s"""
@property
def lowercase_ ( self : Any ):
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def lowercase_ ( self : Tuple ):
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def lowercase_ ( self : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Dict = 40
UpperCAmelCase__ : Dict = {k: v['''failed'''] for k, v in doc_test_results.items() if isinstance(_A , _A )}
UpperCAmelCase__ : Dict = ''''''
for category, failures in category_failures.items():
if len(_A ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_A )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def lowercase_ ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase__ : List[str] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_A )
@staticmethod
def lowercase_ ( ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = [
{
'''type''': '''section''',
'''text''': {
'''type''': '''plain_text''',
'''text''': '''There was an issue running the tests.''',
},
'''accessory''': {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''Check Action results''', '''emoji''': True},
'''url''': f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(_A )} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text='''There was an issue running the tests.''' , blocks=_A , )
def lowercase_ ( self : int ):
'''simple docstring'''
print('''Sending the following payload''' )
print(json.dumps({'''blocks''': json.loads(self.payload )} ) )
UpperCAmelCase__ : Optional[Any] = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else '''All tests passed.'''
UpperCAmelCase__ : Any = client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , blocks=self.payload , text=_A , )
def lowercase_ ( self : Union[str, Any] , _A : Union[str, Any] , _A : Tuple , _A : List[Any] , _A : Dict ):
'''simple docstring'''
UpperCAmelCase__ : Optional[Any] = ''''''
for key, value in failures.items():
UpperCAmelCase__ : str = value[:200] + ''' [Truncated]''' if len(_A ) > 250 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
UpperCAmelCase__ : int = job_name
UpperCAmelCase__ : int = {'''type''': '''section''', '''text''': {'''type''': '''mrkdwn''', '''text''': text}}
if job_link is not None:
UpperCAmelCase__ : List[str] = {
'''type''': '''button''',
'''text''': {'''type''': '''plain_text''', '''text''': '''GitHub Action job''', '''emoji''': True},
'''url''': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def lowercase_ ( self : List[str] ):
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('''Can only post reply if a post has been made.''' )
UpperCAmelCase__ : Optional[int] = self.doc_test_results.pop('''job_link''' )
self.doc_test_results.pop('''failures''' )
self.doc_test_results.pop('''success''' )
self.doc_test_results.pop('''time_spent''' )
UpperCAmelCase__ : Tuple = sorted(self.doc_test_results.items() , key=lambda _A : t[0] )
for job, job_result in sorted_dict:
if len(job_result['''failures'''] ):
UpperCAmelCase__ : Any = f"""*Num failures* :{len(job_result["failed"] )} \n"""
UpperCAmelCase__ : Tuple = job_result['''failures''']
UpperCAmelCase__ : List[Any] = self.get_reply_blocks(_A , _A , _A , text=_A )
print('''Sending the following reply''' )
print(json.dumps({'''blocks''': blocks} ) )
client.chat_postMessage(
channel=os.environ['''CI_SLACK_CHANNEL_ID_DAILY'''] , text=f"""Results for {job}""" , blocks=_A , thread_ts=self.thread_ts['''ts'''] , )
time.sleep(1 )
def a__ ( ) -> int:
UpperCAmelCase__ : Any = os.environ['''GITHUB_RUN_ID''']
UpperCAmelCase__ : List[str] = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
UpperCAmelCase__ : Optional[int] = requests.get(lowerCAmelCase__ ).json()
UpperCAmelCase__ : List[Any] = {}
try:
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
UpperCAmelCase__ : int = math.ceil((result['''total_count'''] - 1_00) / 1_00 )
for i in range(lowerCAmelCase__ ):
UpperCAmelCase__ : Optional[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['''name''']: job['''html_url'''] for job in result['''jobs''']} )
return jobs
except Exception as e:
print('''Unknown error, could not fetch links.''' , lowerCAmelCase__ )
return {}
def a__ ( lowerCAmelCase__ ) -> List[Any]:
UpperCAmelCase__ : Union[str, Any] = {}
if os.path.exists(lowerCAmelCase__ ):
UpperCAmelCase__ : List[str] = os.listdir(lowerCAmelCase__ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase__ , lowerCAmelCase__ ) , encoding='''utf-8''' ) as f:
UpperCAmelCase__ : int = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )}.""" ) from e
return _artifact
def a__ ( ) -> Optional[Any]:
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = name
UpperCAmelCase__ : List[str] = []
def __str__( self : Tuple ):
'''simple docstring'''
return self.name
def lowercase_ ( self : Union[str, Any] , _A : str ):
'''simple docstring'''
self.paths.append({'''name''': self.name, '''path''': path} )
UpperCAmelCase__ : Dict[str, Artifact] = {}
UpperCAmelCase__ : int = filter(os.path.isdir , os.listdir() )
for directory in directories:
UpperCAmelCase__ : int = directory
if artifact_name not in _available_artifacts:
UpperCAmelCase__ : Dict = Artifact(lowerCAmelCase__ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase__ )
return _available_artifacts
if __name__ == "__main__":
UpperCamelCase__ : int = get_job_links()
UpperCamelCase__ : Union[str, Any] = retrieve_available_artifacts()
UpperCamelCase__ : Union[str, Any] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
UpperCamelCase__ : Optional[Any] = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
UpperCamelCase__ : Optional[int] = github_actions_job_links.get('''run_doctests''')
UpperCamelCase__ : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
UpperCamelCase__ : int = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Union[str, Any] = handle_test_results(artifact['''stats'''])
UpperCamelCase__ : Dict = failed
UpperCamelCase__ : Dict = success
UpperCamelCase__ : Dict = time_spent[1:-1] + ''', '''
UpperCamelCase__ : Optional[Any] = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
UpperCamelCase__ : Optional[int] = line.replace('''FAILED ''', '''''')
UpperCamelCase__ : int = line.split()[0].replace('''\n''', '''''')
if "::" in line:
UpperCamelCase__ , UpperCamelCase__ : int = line.split('''::''')
else:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
UpperCamelCase__ : Optional[int] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
UpperCamelCase__ : Tuple = all_failures[test] if test in all_failures else '''N/A'''
UpperCamelCase__ : List[Any] = failure
break
UpperCamelCase__ : Dict = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 353 |
'''simple docstring'''
def a__ ( lowerCAmelCase__ ) -> Optional[Any]:
UpperCAmelCase__ : Optional[Any] = len(lowerCAmelCase__ )
for i in range(length - 1 ):
UpperCAmelCase__ : Optional[Any] = i
for k in range(i + 1 , lowerCAmelCase__ ):
if collection[k] < collection[least]:
UpperCAmelCase__ : Dict = k
if least != i:
UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = (collection[i], collection[least])
return collection
if __name__ == "__main__":
UpperCamelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
UpperCamelCase__ = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 299 | 0 |
'''simple docstring'''
import re
def _A ( A__ ):
"""simple docstring"""
__lowercase = re.compile(
R'''^(?:0|94|\+94|0{2}94)''' R'''7(0|1|2|4|5|6|7|8)''' R'''(-| |)''' R'''\d{7}$''' )
return bool(re.search(A__ , A__ ) )
if __name__ == "__main__":
lowerCAmelCase__ = '''0094702343221'''
print(is_sri_lankan_phone_number(phone))
| 104 |
'''simple docstring'''
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
UpperCAmelCase_ = '\\n@misc{chen2021evaluating,\n title={Evaluating Large Language Models Trained on Code},\n author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \\nand Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \\nand Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \\nand Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \\nand Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \\nand Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \\nand Mohammad Bavarian and Clemens Winter and Philippe Tillet \\nand Felipe Petroski Such and Dave Cummings and Matthias Plappert \\nand Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \\nand William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \\nand Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \\nand William Saunders and Christopher Hesse and Andrew N. Carr \\nand Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \\nand Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \\nand Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \\nand Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},\n year={2021},\n eprint={2107.03374},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n'
UpperCAmelCase_ = '\\nThis metric implements the evaluation harness for the HumanEval problem solving dataset\ndescribed in the paper "Evaluating Large Language Models Trained on Code"\n(https://arxiv.org/abs/2107.03374).\n'
UpperCAmelCase_ = '\nCalculates how good are predictions given some references, using certain scores\nArgs:\n predictions: list of candidates to evaluate. Each candidates should be a list\n of strings with several code candidates to solve the problem.\n references: a list with a test for each prediction. Each test should evaluate the\n correctness of a code candidate.\n k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])\n num_workers: number of workers used to evaluate the canidate programs (Default: 4).\n timeout:\nReturns:\n pass_at_k: dict with pass rates for each k\n results: dict with granular results of each unittest\nExamples:\n >>> code_eval = datasets.load_metric("code_eval")\n >>> test_cases = ["assert add(2,3)==5"]\n >>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]\n >>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])\n >>> print(pass_at_k)\n {\'pass@1\': 0.5, \'pass@2\': 1.0}\n'
UpperCAmelCase_ = '\n################################################################################\n !!!WARNING!!!\n################################################################################\nThe "code_eval" metric executes untrusted model-generated code in Python.\nAlthough it is highly unlikely that model-generated code will do something\novertly malicious in response to this test suite, model-generated code may act\ndestructively due to a lack of model capability or alignment.\nUsers are strongly encouraged to sandbox this evaluation suite so that it\ndoes not perform destructive actions on their host or network. For more\ninformation on how OpenAI sandboxes its code, see the paper "Evaluating Large\nLanguage Models Trained on Code" (https://arxiv.org/abs/2107.03374).\n\nOnce you have read this disclaimer and taken appropriate precautions,\nset the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this\nwith:\n\n>>> import os\n>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"\n\n################################################################################\\n'
UpperCAmelCase_ = 'The MIT License\n\nCopyright (c) OpenAI (https://openai.com)\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the "Software"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : List[str]=[1, 10, 1_00] , _UpperCAmelCase : Optional[Any]=4 , _UpperCAmelCase : Any=3.0 ):
"""simple docstring"""
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=_UpperCAmelCase ) as executor:
UpperCAmelCase__ = []
UpperCAmelCase__ = Counter()
UpperCAmelCase__ = 0
UpperCAmelCase__ = defaultdict(_UpperCAmelCase )
for task_id, (candidates, test_case) in enumerate(zip(_UpperCAmelCase , _UpperCAmelCase ) ):
for candidate in candidates:
UpperCAmelCase__ = candidate + """\n""" + test_case
UpperCAmelCase__ = (test_program, timeout, task_id, completion_id[task_id])
UpperCAmelCase__ = executor.submit(_UpperCAmelCase , *_UpperCAmelCase )
futures.append(_UpperCAmelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(_UpperCAmelCase ):
UpperCAmelCase__ = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
UpperCAmelCase__ , UpperCAmelCase__ = [], []
for result in results.values():
result.sort()
UpperCAmelCase__ = [r[1]["""passed"""] for r in result]
total.append(len(_UpperCAmelCase ) )
correct.append(sum(_UpperCAmelCase ) )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = np.array(_UpperCAmelCase )
UpperCAmelCase__ = k
UpperCAmelCase__ = {f'''pass@{k}''': estimate_pass_at_k(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] ):
'''simple docstring'''
def estimator(SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
UpperCAmelCase__ = itertools.repeat(SCREAMING_SNAKE_CASE__ , len(SCREAMING_SNAKE_CASE__ ) )
else:
assert len(SCREAMING_SNAKE_CASE__ ) == len(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = iter(SCREAMING_SNAKE_CASE__ )
return np.array([estimator(int(SCREAMING_SNAKE_CASE__ ) , int(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ) for n, c in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )] )
| 346 | 0 |
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
UpperCamelCase__ : Union[str, Any] = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, """utils"""))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
UpperCamelCase__ : List[str] = """ \"\"\"\n Output class for the scheduler\'s step function output.\n\n Args:\n prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the\n denoising loop.\n pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):\n The predicted denoised sample (x_{0}) based on the model output from the current timestep.\n `pred_original_sample` can be used to preview progress or for guidance.\n \"\"\"\n\n prev_sample: torch.FloatTensor\n pred_original_sample: Optional[torch.FloatTensor] = None\n"""
class lowerCamelCase_ ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ):
'''simple docstring'''
a = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir ,'''schedulers/''' ) )
a = self.diffusers_dir
shutil.copy(
os.path.join(__A ,'''src/diffusers/schedulers/scheduling_ddpm.py''' ) ,os.path.join(self.diffusers_dir ,'''schedulers/scheduling_ddpm.py''' ) ,)
def SCREAMING_SNAKE_CASE_ ( self : str ):
'''simple docstring'''
a = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ,__lowerCamelCase : str ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : Union[str, Any] ,__lowerCamelCase : int=None ):
'''simple docstring'''
a = comment + F"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
a = comment + F"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
a = black.Mode(target_versions={black.TargetVersion.PYaa} ,line_length=1_19 )
a = black.format_str(__A ,mode=__A )
a = os.path.join(self.diffusers_dir ,'''new_code.py''' )
with open(__A ,'''w''' ,newline='''\n''' ) as f:
f.write(__A )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__A ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name ,overwrite=__A )
with open(__A ,'''r''' ) as f:
self.assertTrue(f.read() ,__A )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = check_copies.find_code_in_diffusers('''schedulers.scheduling_ddpm.DDPMSchedulerOutput''' )
self.assertEqual(__A ,__A )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ):
'''simple docstring'''
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ,'''DDPMSchedulerOutput''' ,REFERENCE_CODE + '''\n''' ,)
# With no empty line at the end
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput''' ,'''DDPMSchedulerOutput''' ,__A ,)
# Copy consistency with rename
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' ,'''TestSchedulerOutput''' ,re.sub('''DDPM''' ,'''Test''' ,__A ) ,)
# Copy consistency with a really long name
a = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
F"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" ,F"""{long_class_name}SchedulerOutput""" ,re.sub('''Bert''' ,__A ,__A ) ,)
# Copy consistency with overwrite
self.check_copy_consistency(
'''# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test''' ,'''TestSchedulerOutput''' ,__A ,overwrite_result=re.sub('''DDPM''' ,'''Test''' ,__A ) ,)
| 351 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class lowerCamelCase_ ( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : Tuple ):
'''simple docstring'''
a = FlaxXLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
a = AutoTokenizer.from_pretrained('''xlm-roberta-base''' )
a = '''The dog is cute and lives in the garden house'''
a = jnp.array([tokenizer.encode(__lowerCamelCase )] )
a = (1, 12, 7_68) # batch_size, sequence_length, embedding_vector_dim
a = jnp.array(
[[-0.0_101, 0.1_218, -0.0_803, 0.0_801, 0.1_327, 0.0_776, -0.1_215, 0.2_383, 0.3_338, 0.3_106, 0.0_300, 0.0_252]] )
a = model(__lowerCamelCase )['''last_hidden_state''']
self.assertEqual(output.shape ,__lowerCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] ,__lowerCamelCase ,atol=1e-3 ) )
| 330 | 0 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[int] , _lowerCamelCase : Dict) -> Any:
'''simple docstring'''
__UpperCamelCase : Optional[int] = XCLIPTextConfig()
# derive patch size from model name
__UpperCamelCase : Union[str, Any] = model_name.find("patch")
__UpperCamelCase : Optional[Any] = int(model_name[start_idx + len("patch") : start_idx + len("patch") + 2])
__UpperCamelCase : Optional[int] = XCLIPVisionConfig(patch_size=__UpperCAmelCase , num_frames=__UpperCAmelCase)
if "large" in model_name:
__UpperCamelCase : List[Any] = 768
__UpperCamelCase : Optional[Any] = 3_072
__UpperCamelCase : Optional[Any] = 12
__UpperCamelCase : List[Any] = 1_024
__UpperCamelCase : List[str] = 4_096
__UpperCamelCase : List[Any] = 16
__UpperCamelCase : str = 24
__UpperCamelCase : Tuple = 768
__UpperCamelCase : Tuple = 3_072
if model_name == "xclip-large-patch14-16-frames":
__UpperCamelCase : Dict = 336
__UpperCamelCase : Union[str, Any] = XCLIPConfig.from_text_vision_configs(__UpperCAmelCase , __UpperCAmelCase)
if "large" in model_name:
__UpperCamelCase : Any = 768
return config
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Union[str, Any]) -> Optional[int]:
'''simple docstring'''
if name == "token_embedding.weight":
__UpperCamelCase : List[Any] = name.replace("token_embedding.weight" , "text_model.embeddings.token_embedding.weight")
if name == "positional_embedding":
__UpperCamelCase : Union[str, Any] = name.replace("positional_embedding" , "text_model.embeddings.position_embedding.weight")
if "ln_1" in name:
__UpperCamelCase : str = name.replace("ln_1" , "layer_norm1")
if "ln_2" in name:
__UpperCamelCase : List[Any] = name.replace("ln_2" , "layer_norm2")
if "c_fc" in name:
__UpperCamelCase : List[Any] = name.replace("c_fc" , "fc1")
if "c_proj" in name:
__UpperCamelCase : List[Any] = name.replace("c_proj" , "fc2")
if name.startswith("transformer.resblocks"):
__UpperCamelCase : Any = name.replace("transformer.resblocks" , "text_model.encoder.layers")
if "attn.out_proj" in name and "message" not in name:
__UpperCamelCase : Any = name.replace("attn.out_proj" , "self_attn.out_proj")
if "ln_final" in name:
__UpperCamelCase : List[str] = name.replace("ln_final" , "text_model.final_layer_norm")
# visual encoder
if name == "visual.class_embedding":
__UpperCamelCase : Optional[int] = name.replace("visual.class_embedding" , "vision_model.embeddings.class_embedding")
if name == "visual.positional_embedding":
__UpperCamelCase : str = name.replace("visual.positional_embedding" , "vision_model.embeddings.position_embedding.weight")
if name.startswith("visual.transformer.resblocks"):
__UpperCamelCase : Tuple = name.replace("visual.transformer.resblocks" , "vision_model.encoder.layers")
if "visual.conv1" in name:
__UpperCamelCase : Any = name.replace("visual.conv1" , "vision_model.embeddings.patch_embedding")
if "visual.ln_pre" in name:
__UpperCamelCase : Tuple = name.replace("visual.ln_pre" , "vision_model.pre_layernorm")
if "visual.ln_post" in name:
__UpperCamelCase : Optional[Any] = name.replace("visual.ln_post" , "vision_model.post_layernorm")
if "visual.proj" in name:
__UpperCamelCase : Any = name.replace("visual.proj" , "visual_projection.weight")
if "text_projection" in name:
__UpperCamelCase : Optional[Any] = name.replace("text_projection" , "text_projection.weight")
# things on top
if "prompts_visual_proj" in name:
__UpperCamelCase : List[Any] = name.replace("prompts_visual_proj" , "prompts_visual_projection")
if "prompts_visual_ln" in name:
__UpperCamelCase : Union[str, Any] = name.replace("prompts_visual_ln" , "prompts_visual_layernorm")
# mit
if name == "mit.positional_embedding":
__UpperCamelCase : Optional[Any] = name.replace("positional" , "position")
if name.startswith("mit.resblocks"):
__UpperCamelCase : List[str] = name.replace("mit.resblocks" , "mit.encoder.layers")
# prompts generator
if name.startswith("prompts_generator.norm"):
__UpperCamelCase : List[str] = name.replace("prompts_generator.norm" , "prompts_generator.layernorm")
return name
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict , _lowerCamelCase : List[str]) -> Dict:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
__UpperCamelCase : str = orig_state_dict.pop(__UpperCAmelCase)
if "attn.in_proj" in key:
__UpperCamelCase : List[str] = key.split(".")
if key.startswith("visual"):
__UpperCamelCase : Union[str, Any] = key_split[3]
__UpperCamelCase : Optional[Any] = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
__UpperCamelCase : List[str] = val[
:dim, :
]
__UpperCamelCase : Optional[int] = val[
dim : dim * 2, :
]
__UpperCamelCase : Optional[Any] = val[
-dim:, :
]
else:
__UpperCamelCase : Optional[int] = val[
:dim
]
__UpperCamelCase : Optional[Any] = val[
dim : dim * 2
]
__UpperCamelCase : Optional[Any] = val[
-dim:
]
else:
if "weight" in key:
__UpperCamelCase : int = val[
:dim, :
]
__UpperCamelCase : Union[str, Any] = val[
dim : dim * 2, :
]
__UpperCamelCase : List[Any] = val[
-dim:, :
]
else:
__UpperCamelCase : str = val[:dim]
__UpperCamelCase : Any = val[
dim : dim * 2
]
__UpperCamelCase : Dict = val[-dim:]
elif key.startswith("mit"):
__UpperCamelCase : Tuple = key_split[2]
__UpperCamelCase : Tuple = config.vision_config.mit_hidden_size
if "weight" in key:
__UpperCamelCase : str = val[:dim, :]
__UpperCamelCase : List[Any] = val[dim : dim * 2, :]
__UpperCamelCase : List[str] = val[-dim:, :]
else:
__UpperCamelCase : Any = val[:dim]
__UpperCamelCase : Optional[Any] = val[dim : dim * 2]
__UpperCamelCase : Optional[int] = val[-dim:]
else:
__UpperCamelCase : Tuple = key_split[2]
__UpperCamelCase : str = config.text_config.hidden_size
if "weight" in key:
__UpperCamelCase : int = val[:dim, :]
__UpperCamelCase : Any = val[
dim : dim * 2, :
]
__UpperCamelCase : int = val[-dim:, :]
else:
__UpperCamelCase : str = val[:dim]
__UpperCamelCase : Optional[int] = val[
dim : dim * 2
]
__UpperCamelCase : Optional[Any] = val[-dim:]
else:
__UpperCamelCase : Dict = rename_key(__UpperCAmelCase)
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
__UpperCamelCase : Union[str, Any] = val.T
__UpperCamelCase : Dict = val
return orig_state_dict
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any]) -> Tuple:
'''simple docstring'''
if num_frames == 8:
__UpperCamelCase : Optional[Any] = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
__UpperCamelCase : str = '''eating_spaghetti.npy'''
elif num_frames == 32:
__UpperCamelCase : str = '''eating_spaghetti_32_frames.npy'''
__UpperCamelCase : List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename=__UpperCAmelCase , repo_type="dataset" , )
__UpperCamelCase : str = np.load(__UpperCAmelCase)
return list(__UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( _lowerCamelCase : Optional[Any] , _lowerCamelCase : Any=None , _lowerCamelCase : str=False) -> List[Any]:
'''simple docstring'''
__UpperCamelCase : Optional[Any] = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
__UpperCamelCase : Optional[Any] = model_to_url[model_name]
__UpperCamelCase : Dict = 8
if "16-frames" in model_name:
__UpperCamelCase : Optional[Any] = 16
elif "shot" in model_name:
__UpperCamelCase : Any = 32
__UpperCamelCase : Any = get_xclip_config(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase : Any = XCLIPModel(__UpperCAmelCase)
model.eval()
if "drive" in checkpoint_url:
__UpperCamelCase : int = '''pytorch_model.bin'''
gdown.cached_download(__UpperCAmelCase , __UpperCAmelCase , quiet=__UpperCAmelCase)
__UpperCamelCase : Optional[int] = torch.load(__UpperCAmelCase , map_location="cpu")['''model''']
else:
__UpperCamelCase : Optional[int] = torch.hub.load_state_dict_from_url(__UpperCAmelCase)['''model''']
__UpperCamelCase : Any = convert_state_dict(__UpperCAmelCase , __UpperCAmelCase)
__UpperCamelCase : Any = XCLIPModel(__UpperCAmelCase)
__UpperCamelCase : Dict = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase)
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
__UpperCamelCase : Union[str, Any] = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
__UpperCamelCase : str = VideoMAEImageProcessor(size=__UpperCAmelCase)
__UpperCamelCase : List[str] = CLIPTokenizer.from_pretrained("openai/clip-vit-base-patch32")
__UpperCamelCase : List[str] = CLIPTokenizerFast.from_pretrained("openai/clip-vit-base-patch32")
__UpperCamelCase : Union[str, Any] = XCLIPProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
__UpperCamelCase : str = prepare_video(__UpperCAmelCase)
__UpperCamelCase : List[str] = processor(
text=["playing sports", "eating spaghetti", "go shopping"] , videos=__UpperCAmelCase , return_tensors="pt" , padding=__UpperCAmelCase)
print("Shape of pixel values:" , inputs.pixel_values.shape)
with torch.no_grad():
__UpperCamelCase : Any = model(**__UpperCAmelCase)
# Verify outputs
__UpperCamelCase : Any = outputs.logits_per_video
__UpperCamelCase : str = logits_per_video.softmax(dim=1)
print("Probs:" , __UpperCAmelCase)
# kinetics-400
if model_name == "xclip-base-patch32":
__UpperCamelCase : Tuple = torch.tensor([[0.0_0_1_9, 0.9_9_5_1, 0.0_0_3_0]])
elif model_name == "xclip-base-patch32-16-frames":
__UpperCamelCase : int = torch.tensor([[7.0999e-04, 9.9883e-01, 4.5580e-04]])
elif model_name == "xclip-base-patch16":
__UpperCamelCase : List[str] = torch.tensor([[0.0_0_8_3, 0.9_6_8_1, 0.0_2_3_6]])
elif model_name == "xclip-base-patch16-16-frames":
__UpperCamelCase : Optional[Any] = torch.tensor([[7.6937e-04, 9.9728e-01, 1.9473e-03]])
elif model_name == "xclip-large-patch14":
__UpperCamelCase : Tuple = torch.tensor([[0.0_0_6_2, 0.9_8_6_4, 0.0_0_7_5]])
elif model_name == "xclip-large-patch14-16-frames":
__UpperCamelCase : Union[str, Any] = torch.tensor([[3.3877e-04, 9.9937e-01, 2.8888e-04]])
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
__UpperCamelCase : List[str] = torch.tensor([[0.0_5_5_5, 0.8_9_1_4, 0.0_5_3_1]])
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
__UpperCamelCase : Dict = torch.tensor([[3.8554e-04, 9.9929e-01, 3.2754e-04]])
elif model_name == "xclip-large-patch14-kinetics-600":
__UpperCamelCase : Dict = torch.tensor([[0.0_0_3_6, 0.9_9_2_0, 0.0_0_4_5]])
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
__UpperCamelCase : Optional[int] = torch.tensor([[7.1890e-06, 9.9994e-01, 5.6559e-05]])
elif model_name == "xclip-base-patch16-hmdb-4-shot":
__UpperCamelCase : List[str] = torch.tensor([[1.0320e-05, 9.9993e-01, 6.2435e-05]])
elif model_name == "xclip-base-patch16-hmdb-8-shot":
__UpperCamelCase : Dict = torch.tensor([[4.1377e-06, 9.9990e-01, 9.8386e-05]])
elif model_name == "xclip-base-patch16-hmdb-16-shot":
__UpperCamelCase : List[Any] = torch.tensor([[4.1347e-05, 9.9962e-01, 3.3411e-04]])
elif model_name == "xclip-base-patch16-ucf-2-shot":
__UpperCamelCase : Any = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]])
elif model_name == "xclip-base-patch16-ucf-4-shot":
__UpperCamelCase : List[Any] = torch.tensor([[8.5857e-05, 9.9928e-01, 6.3291e-04]])
elif model_name == "xclip-base-patch16-ucf-8-shot":
__UpperCamelCase : int = torch.tensor([[0.0_0_2_7, 0.9_9_0_4, 0.0_0_7_0]])
elif model_name == "xclip-base-patch16-ucf-16-shot":
__UpperCamelCase : Optional[int] = torch.tensor([[9.8219e-04, 9.9593e-01, 3.0863e-03]])
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
__UpperCamelCase : int = torch.tensor([[3.5082e-04, 9.9785e-01, 1.7966e-03]])
else:
raise ValueError(F'Model name {model_name} not supported')
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase , atol=1e-3)
print("Looks ok!")
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(__UpperCAmelCase)
if push_to_hub:
print("Pushing model, processor and slow tokenizer files to the hub...")
model.push_to_hub(__UpperCAmelCase , organization="nielsr")
processor.push_to_hub(__UpperCAmelCase , organization="nielsr")
slow_tokenizer.push_to_hub(__UpperCAmelCase , organization="nielsr")
if __name__ == "__main__":
lowercase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='xclip-base-patch32',
type=str,
help='Name of the model.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowercase : Optional[Any] = parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 232 | """simple docstring"""
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__A = logging.get_logger(__name__)
__A = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase :Dict = "deta"
_UpperCAmelCase :Any = {
"hidden_size": "d_model",
"num_attention_heads": "encoder_attention_heads",
}
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=900 , _UpperCAmelCase=2048 , _UpperCAmelCase=6 , _UpperCAmelCase=2048 , _UpperCAmelCase=8 , _UpperCAmelCase=6 , _UpperCAmelCase=1024 , _UpperCAmelCase=8 , _UpperCAmelCase=0.0 , _UpperCAmelCase=True , _UpperCAmelCase="relu" , _UpperCAmelCase=256 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=1.0 , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase="sine" , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=4 , _UpperCAmelCase=True , _UpperCAmelCase=300 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=1 , _UpperCAmelCase=1 , _UpperCAmelCase=5 , _UpperCAmelCase=2 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.25 , **_UpperCAmelCase , ):
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowercase__: str = CONFIG_MAPPING['''resnet'''](out_features=['''stage2''', '''stage3''', '''stage4'''] )
else:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowercase__: Tuple = backbone_config.pop('''model_type''' )
lowercase__: str = CONFIG_MAPPING[backbone_model_type]
lowercase__: Optional[int] = config_class.from_dict(_UpperCAmelCase )
lowercase__: int = backbone_config
lowercase__: Any = num_queries
lowercase__: List[str] = max_position_embeddings
lowercase__: Optional[Any] = d_model
lowercase__: List[Any] = encoder_ffn_dim
lowercase__: Tuple = encoder_layers
lowercase__: Dict = encoder_attention_heads
lowercase__: Any = decoder_ffn_dim
lowercase__: Union[str, Any] = decoder_layers
lowercase__: List[Any] = decoder_attention_heads
lowercase__: int = dropout
lowercase__: List[str] = attention_dropout
lowercase__: Tuple = activation_dropout
lowercase__: Tuple = activation_function
lowercase__: int = init_std
lowercase__: Optional[Any] = init_xavier_std
lowercase__: Optional[Any] = encoder_layerdrop
lowercase__: Optional[int] = auxiliary_loss
lowercase__: Union[str, Any] = position_embedding_type
# deformable attributes
lowercase__: List[str] = num_feature_levels
lowercase__: Optional[Any] = encoder_n_points
lowercase__: int = decoder_n_points
lowercase__: str = two_stage
lowercase__: Optional[int] = two_stage_num_proposals
lowercase__: Tuple = with_box_refine
lowercase__: str = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
lowercase__: Union[str, Any] = class_cost
lowercase__: Optional[int] = bbox_cost
lowercase__: int = giou_cost
# Loss coefficients
lowercase__: Optional[int] = mask_loss_coefficient
lowercase__: List[str] = dice_loss_coefficient
lowercase__: str = bbox_loss_coefficient
lowercase__: Union[str, Any] = giou_loss_coefficient
lowercase__: Optional[int] = eos_coefficient
lowercase__: str = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def _snake_case ( self ):
return self.encoder_attention_heads
@property
def _snake_case ( self ):
return self.d_model
def _snake_case ( self ):
lowercase__: Union[str, Any] = copy.deepcopy(self.__dict__ )
lowercase__: Dict = self.backbone_config.to_dict()
lowercase__: Union[str, Any] = self.__class__.model_type
return output
| 177 | 0 |
"""simple docstring"""
import argparse
import json
import os
from collections import OrderedDict
import torch
from transformers import LukeConfig, LukeForMaskedLM, MLukeTokenizer, XLMRobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case : str , snake_case : List[str] , snake_case : Dict , snake_case : List[Any] , snake_case : str )-> Any:
# Load configuration defined in the metadata file
with open(snake_case ) as metadata_file:
_lowerCamelCase = json.load(snake_case )
_lowerCamelCase = LukeConfig(use_entity_aware_attention=snake_case , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_lowerCamelCase = torch.load(snake_case , map_location='cpu' )['module']
# Load the entity vocab file
_lowerCamelCase = load_original_entity_vocab(snake_case )
# add an entry for [MASK2]
_lowerCamelCase = max(entity_vocab.values() ) + 1
config.entity_vocab_size += 1
_lowerCamelCase = XLMRobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_lowerCamelCase = AddedToken('<ent>' , lstrip=snake_case , rstrip=snake_case )
_lowerCamelCase = AddedToken('<ent2>' , lstrip=snake_case , rstrip=snake_case )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(snake_case )
with open(os.path.join(snake_case , 'tokenizer_config.json' ) , 'r' ) as f:
_lowerCamelCase = json.load(snake_case )
_lowerCamelCase = 'MLukeTokenizer'
with open(os.path.join(snake_case , 'tokenizer_config.json' ) , 'w' ) as f:
json.dump(snake_case , snake_case )
with open(os.path.join(snake_case , MLukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(snake_case , snake_case )
_lowerCamelCase = MLukeTokenizer.from_pretrained(snake_case )
# Initialize the embeddings of the special tokens
_lowerCamelCase = tokenizer.convert_tokens_to_ids(['@'] )[0]
_lowerCamelCase = tokenizer.convert_tokens_to_ids(['#'] )[0]
_lowerCamelCase = state_dict['embeddings.word_embeddings.weight']
_lowerCamelCase = word_emb[ent_init_index].unsqueeze(0 )
_lowerCamelCase = word_emb[enta_init_index].unsqueeze(0 )
_lowerCamelCase = torch.cat([word_emb, ent_emb, enta_emb] )
# add special tokens for 'entity_predictions.bias'
for bias_name in ["lm_head.decoder.bias", "lm_head.bias"]:
_lowerCamelCase = state_dict[bias_name]
_lowerCamelCase = decoder_bias[ent_init_index].unsqueeze(0 )
_lowerCamelCase = decoder_bias[enta_init_index].unsqueeze(0 )
_lowerCamelCase = torch.cat([decoder_bias, ent_decoder_bias, enta_decoder_bias] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_lowerCamelCase = f'encoder.layer.{layer_index}.attention.self.'
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
_lowerCamelCase = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_lowerCamelCase = state_dict['entity_embeddings.entity_embeddings.weight']
_lowerCamelCase = entity_emb[entity_vocab['[MASK]']].unsqueeze(0 )
_lowerCamelCase = torch.cat([entity_emb, entity_mask_emb] )
# add [MASK2] for 'entity_predictions.bias'
_lowerCamelCase = state_dict['entity_predictions.bias']
_lowerCamelCase = entity_prediction_bias[entity_vocab['[MASK]']].unsqueeze(0 )
_lowerCamelCase = torch.cat([entity_prediction_bias, entity_mask_bias] )
_lowerCamelCase = LukeForMaskedLM(config=snake_case ).eval()
state_dict.pop('entity_predictions.decoder.weight' )
state_dict.pop('lm_head.decoder.weight' )
state_dict.pop('lm_head.decoder.bias' )
_lowerCamelCase = OrderedDict()
for key, value in state_dict.items():
if not (key.startswith('lm_head' ) or key.startswith('entity_predictions' )):
_lowerCamelCase = state_dict[key]
else:
_lowerCamelCase = state_dict[key]
_lowerCamelCase , _lowerCamelCase = model.load_state_dict(snake_case , strict=snake_case )
if set(snake_case ) != {"luke.embeddings.position_ids"}:
raise ValueError(f'Unexpected unexpected_keys: {unexpected_keys}' )
if set(snake_case ) != {
"lm_head.decoder.weight",
"lm_head.decoder.bias",
"entity_predictions.decoder.weight",
}:
raise ValueError(f'Unexpected missing_keys: {missing_keys}' )
model.tie_weights()
assert (model.luke.embeddings.word_embeddings.weight == model.lm_head.decoder.weight).all()
assert (model.luke.entity_embeddings.entity_embeddings.weight == model.entity_predictions.decoder.weight).all()
# Check outputs
_lowerCamelCase = MLukeTokenizer.from_pretrained(snake_case , task='entity_classification' )
_lowerCamelCase = 'ISO 639-3 uses the code fas for the dialects spoken across Iran and アフガニスタン (Afghanistan).'
_lowerCamelCase = (0, 9)
_lowerCamelCase = tokenizer(snake_case , entity_spans=[span] , return_tensors='pt' )
_lowerCamelCase = model(**snake_case )
# Verify word hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCamelCase = torch.Size((1, 33, 768) )
_lowerCamelCase = torch.tensor([[0.0_8_9_2, 0.0_5_9_6, -0.2_8_1_9], [0.0_1_3_4, 0.1_1_9_9, 0.0_5_7_3], [-0.0_1_6_9, 0.0_9_2_7, 0.0_6_4_4]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
raise NotImplementedError
else: # base
_lowerCamelCase = torch.Size((1, 1, 768) )
_lowerCamelCase = torch.tensor([[-0.1_4_8_2, 0.0_6_0_9, 0.0_3_2_2]] )
if not (outputs.entity_last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , snake_case , atol=1e-4 ):
raise ValueError
# Verify masked word/entity prediction
_lowerCamelCase = MLukeTokenizer.from_pretrained(snake_case )
_lowerCamelCase = 'Tokyo is the capital of <mask>.'
_lowerCamelCase = (24, 30)
_lowerCamelCase = tokenizer(snake_case , entity_spans=[span] , return_tensors='pt' )
_lowerCamelCase = model(**snake_case )
_lowerCamelCase = encoding['input_ids'][0].tolist()
_lowerCamelCase = input_ids.index(tokenizer.convert_tokens_to_ids('<mask>' ) )
_lowerCamelCase = outputs.logits[0][mask_position_id].argmax(dim=-1 )
assert "Japan" == tokenizer.decode(snake_case )
_lowerCamelCase = outputs.entity_logits[0][0].argmax().item()
_lowerCamelCase = [
entity for entity, entity_id in tokenizer.entity_vocab.items() if entity_id == predicted_entity_id
]
assert [e for e in multilingual_predicted_entities if e.startswith('en:' )][0] == "en:Japan"
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(snake_case ) )
model.save_pretrained(snake_case )
def SCREAMING_SNAKE_CASE_ ( snake_case : Optional[Any] )-> Tuple:
_lowerCamelCase = ['[MASK]', '[PAD]', '[UNK]']
_lowerCamelCase = [json.loads(snake_case ) for line in open(snake_case )]
_lowerCamelCase = {}
for entry in data:
_lowerCamelCase = entry['id']
for entity_name, language in entry["entities"]:
if entity_name in SPECIAL_TOKENS:
_lowerCamelCase = entity_id
break
_lowerCamelCase = f'{language}:{entity_name}'
_lowerCamelCase = entity_id
return new_mapping
if __name__ == "__main__":
A_ : List[Any] =argparse.ArgumentParser()
# Required parameters
parser.add_argument("""--checkpoint_path""", type=str, help="""Path to a pytorch_model.bin file.""")
parser.add_argument(
"""--metadata_path""", default=None, type=str, help="""Path to a metadata.json file, defining the configuration."""
)
parser.add_argument(
"""--entity_vocab_path""",
default=None,
type=str,
help="""Path to an entity_vocab.tsv file, containing the entity vocabulary.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to where to dump the output PyTorch model."""
)
parser.add_argument(
"""--model_size""", default="""base""", type=str, choices=["""base""", """large"""], help="""Size of the model to be converted."""
)
A_ : List[str] =parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 80 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Any ={
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 1 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a_ = logging.get_logger(__name__)
class _UpperCamelCase ( __A ):
'''simple docstring'''
lowerCamelCase__ =['input_values', 'padding_mask']
def __init__( self : str , a : int = 1 , a : int = 2_4000 , a : float = 0.0 , a : float = None , a : float = None , **a : Optional[Any] , ) -> List[Any]:
"""simple docstring"""
super().__init__(feature_size=a , sampling_rate=a , padding_value=a , **a )
SCREAMING_SNAKE_CASE : str = chunk_length_s
SCREAMING_SNAKE_CASE : List[str] = overlap
@property
def __UpperCamelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __UpperCamelCase ( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Optional[int] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[bool, str, PaddingStrategy]] = None , a : Optional[bool] = False , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , a : Optional[int] = None , ) -> BatchFeature:
"""simple docstring"""
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
F" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"
F" {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
if padding and truncation:
raise ValueError("Both padding and truncation were set. Make sure you only set one." )
elif padding is None:
# by default let's pad the inputs
SCREAMING_SNAKE_CASE : str = True
SCREAMING_SNAKE_CASE : List[Any] = bool(
isinstance(a , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(a , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(a , np.ndarray ):
SCREAMING_SNAKE_CASE : Any = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE : Union[str, Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
SCREAMING_SNAKE_CASE : Optional[int] = [np.asarray(a ).T]
# verify inputs are valid
for idx, example in enumerate(a ):
if example.ndim > 2:
raise ValueError(F"Expected input shape (channels, length) but got shape {example.shape}" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F"Expected mono audio but example has {example.shape[-1]} channels" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F"Expected stereo audio but example has {example.shape[-1]} channels" )
SCREAMING_SNAKE_CASE : Union[str, Any] = None
SCREAMING_SNAKE_CASE : Tuple = BatchFeature({"input_values": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
SCREAMING_SNAKE_CASE : Optional[Any] = min(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE : Optional[int] = int(np.floor(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
SCREAMING_SNAKE_CASE : List[Any] = max(array.shape[0] for array in raw_audio )
SCREAMING_SNAKE_CASE : int = int(np.ceil(max_length / self.chunk_stride ) )
SCREAMING_SNAKE_CASE : List[Any] = (nb_step - 1) * self.chunk_stride + self.chunk_length
SCREAMING_SNAKE_CASE : str = "max_length"
else:
SCREAMING_SNAKE_CASE : List[Any] = input_values
# normal padding on batch
if padded_inputs is None:
SCREAMING_SNAKE_CASE : Optional[Any] = self.pad(
a , max_length=a , truncation=a , padding=a , return_attention_mask=a , )
if padding:
SCREAMING_SNAKE_CASE : Optional[Any] = padded_inputs.pop("attention_mask" )
SCREAMING_SNAKE_CASE : List[str] = []
for example in padded_inputs.pop("input_values" ):
if self.feature_size == 1:
SCREAMING_SNAKE_CASE : Tuple = example[..., None]
input_values.append(example.T )
SCREAMING_SNAKE_CASE : Dict = input_values
if return_tensors is not None:
SCREAMING_SNAKE_CASE : Optional[int] = padded_inputs.convert_to_tensors(a )
return padded_inputs | 76 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(_SCREAMING_SNAKE_CASE )
# Let's go
SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(_SCREAMING_SNAKE_CASE , """func""" ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE = args.func(_SCREAMING_SNAKE_CASE )
service.run()
if __name__ == "__main__":
main()
| 296 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class __a ( lowerCAmelCase__ ):
@staticmethod
@abstractmethod
def snake_case_ ( a__ ):
raise NotImplementedError()
@abstractmethod
def snake_case_ ( self ):
raise NotImplementedError()
| 80 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def SCREAMING_SNAKE_CASE_ ( )-> List[Any]:
_lowerCamelCase = ArgumentParser('Accelerate CLI tool' , usage='accelerate <command> [<args>]' , allow_abbrev=snake_case )
_lowerCamelCase = parser.add_subparsers(help='accelerate command helpers' )
# Register commands
get_config_parser(subparsers=snake_case )
env_command_parser(subparsers=snake_case )
launch_command_parser(subparsers=snake_case )
tpu_command_parser(subparsers=snake_case )
test_command_parser(subparsers=snake_case )
# Let's go
_lowerCamelCase = parser.parse_args()
if not hasattr(snake_case , 'func' ):
parser.print_help()
exit(1 )
# Run
args.func(snake_case )
if __name__ == "__main__":
main()
| 80 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class lowercase__ :
'''simple docstring'''
def __init__( self, __magic_name__ ) -> Tuple:
"""simple docstring"""
UpperCamelCase__ : Any = data
UpperCamelCase__ : Node | None = None
class lowercase__ :
'''simple docstring'''
def __init__( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : int = None
UpperCamelCase__ : Tuple = None
def __iter__( self ) -> Iterator[Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = self.head
while self.head:
yield node.data
UpperCamelCase__ : List[Any] = node.next
if node == self.head:
break
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> Any:
"""simple docstring"""
return "->".join(str(__magic_name__ ) for item in iter(self ) )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
self.insert_nth(len(self ), __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__ ) -> None:
"""simple docstring"""
self.insert_nth(0, __magic_name__ )
def UpperCamelCase__ ( self, __magic_name__, __magic_name__ ) -> None:
"""simple docstring"""
if index < 0 or index > len(self ):
raise IndexError('''list index out of range.''' )
UpperCamelCase__ : Union[str, Any] = Node(__magic_name__ )
if self.head is None:
UpperCamelCase__ : Any = new_node # first node points itself
UpperCamelCase__ : str = new_node
elif index == 0: # insert at head
UpperCamelCase__ : Optional[int] = self.head
UpperCamelCase__ : Optional[Any] = new_node
else:
UpperCamelCase__ : List[Any] = self.head
for _ in range(index - 1 ):
UpperCamelCase__ : List[Any] = temp.next
UpperCamelCase__ : Any = temp.next
UpperCamelCase__ : Dict = new_node
if index == len(self ) - 1: # insert at tail
UpperCamelCase__ : List[Any] = new_node
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase__ ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase__ ( self, __magic_name__ = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise IndexError('''list index out of range.''' )
UpperCamelCase__ : Tuple = self.head
if self.head == self.tail: # just one node
UpperCamelCase__ : Dict = None
elif index == 0: # delete head node
UpperCamelCase__ : Any = self.tail.next.next
UpperCamelCase__ : Dict = self.head.next
else:
UpperCamelCase__ : str = self.head
for _ in range(index - 1 ):
UpperCamelCase__ : Optional[Any] = temp.next
UpperCamelCase__ : Tuple = temp.next
UpperCamelCase__ : Dict = temp.next.next
if index == len(self ) - 1: # delete at tail
UpperCamelCase__ : Optional[int] = temp
return delete_node.data
def UpperCamelCase__ ( self ) -> bool:
"""simple docstring"""
return len(self ) == 0
def lowerCAmelCase_ ( ) -> None:
UpperCamelCase__ : str = CircularLinkedList()
assert len(__UpperCAmelCase ) == 0
assert circular_linked_list.is_empty() is True
assert str(__UpperCAmelCase ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(__UpperCAmelCase ) == i
circular_linked_list.insert_nth(__UpperCAmelCase , i + 1 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(__UpperCAmelCase ) == "->".join(str(__UpperCAmelCase ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 201 |
def lowerCAmelCase_ ( __UpperCAmelCase: float , __UpperCAmelCase: int ) -> float:
if digit_amount > 0:
return round(number - int(__UpperCAmelCase ) , __UpperCAmelCase )
return number - int(__UpperCAmelCase )
if __name__ == "__main__":
print(decimal_isolate(1.53, 0))
print(decimal_isolate(35.345, 1))
print(decimal_isolate(35.345, 2))
print(decimal_isolate(35.345, 3))
print(decimal_isolate(-14.789, 3))
print(decimal_isolate(0, 2))
print(decimal_isolate(-14.123, 1))
print(decimal_isolate(-14.123, 2))
print(decimal_isolate(-14.123, 3))
| 201 | 1 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def SCREAMING_SNAKE_CASE__ ( snake_case_ = True, *snake_case_, **snake_case_ ) -> Any:
"""simple docstring"""
if not is_tqdm_available():
raise ImportError('''Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.''' )
a = False
if main_process_only:
a = PartialState().local_process_index == 0
return _tqdm(*snake_case_, **snake_case_, disable=snake_case_ )
| 330 |
def SCREAMING_SNAKE_CASE__ ( snake_case_ ) -> Union[str, Any]:
"""simple docstring"""
stooge(snake_case_, 0, len(snake_case_ ) - 1 )
return arr
def SCREAMING_SNAKE_CASE__ ( snake_case_, snake_case_, snake_case_ ) -> Optional[Any]:
"""simple docstring"""
if i >= h:
return
# If first element is smaller than the last then swap them
if arr[i] > arr[h]:
a , a = arr[h], arr[i]
# If there are more than 2 elements in the array
if h - i + 1 > 2:
a = (int)((h - i + 1) / 3 )
# Recursively sort first 2/3 elements
stooge(snake_case_, snake_case_, (h - t) )
# Recursively sort last 2/3 elements
stooge(snake_case_, i + t, (snake_case_) )
# Recursively sort first 2/3 elements
stooge(snake_case_, snake_case_, (h - t) )
if __name__ == "__main__":
UpperCamelCase__ : Dict = input("""Enter numbers separated by a comma:\n""").strip()
UpperCamelCase__ : Optional[int] = [int(item) for item in user_input.split(""",""")]
print(stooge_sort(unsorted))
| 330 | 1 |
'''simple docstring'''
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase=None ,__UpperCAmelCase="resnet50" ,__UpperCAmelCase=3 ,__UpperCAmelCase=32 ,__UpperCAmelCase=3 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,) -> Optional[int]:
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : Tuple = out_indices if out_indices is not None else [4]
lowerCAmelCase__ : Dict = stage_names
lowerCAmelCase__ : Optional[Any] = out_features
lowerCAmelCase__ : Optional[Any] = backbone
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : Any = num_channels
lowerCAmelCase__ : int = use_pretrained_backbone
lowerCAmelCase__ : Any = is_training
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = self.get_config()
return config, pixel_values
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = TimmBackbone(config=_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(_lowerCamelCase )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[int] = self.prepare_config_and_inputs()
lowerCAmelCase__ : Tuple = config_and_inputs
lowerCAmelCase__ : List[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class lowerCAmelCase_( a__ , a__ , a__ , unittest.TestCase ):
'''simple docstring'''
__lowercase : str = (TimmBackbone,) if is_torch_available() else ()
__lowercase : str = {'feature-extraction': TimmBackbone} if is_torch_available() else {}
__lowercase : Any = False
__lowercase : Tuple = False
__lowercase : Any = False
__lowercase : Optional[Any] = False
def UpperCAmelCase_ ( self ) -> Tuple:
lowerCAmelCase__ : Tuple = TimmBackboneModelTester(self )
lowerCAmelCase__ : Optional[int] = ConfigTester(self ,config_class=_lowerCamelCase ,has_text_modality=_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Any = '''resnet18'''
lowerCAmelCase__ : Dict = '''microsoft/resnet-18'''
lowerCAmelCase__ : int = AutoBackbone.from_pretrained(_lowerCamelCase ,use_timm_backbone=_lowerCamelCase )
lowerCAmelCase__ : str = AutoBackbone.from_pretrained(_lowerCamelCase )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
lowerCAmelCase__ : Optional[Any] = AutoBackbone.from_pretrained(_lowerCamelCase ,use_timm_backbone=_lowerCamelCase ,out_indices=[1, 2, 3] )
lowerCAmelCase__ : Dict = AutoBackbone.from_pretrained(_lowerCamelCase ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip("""TimmBackbone doesn\'t support feed forward chunking""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip("""TimmBackbone doesn\'t have num_hidden_layers attribute""" )
def UpperCAmelCase_ ( self ) -> Any:
pass
@unittest.skip("""TimmBackbone initialization is managed on the timm side""" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""TimmBackbone models doesn\'t have inputs_embeds""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip("""TimmBackbone model cannot be created without specifying a backbone checkpoint""" )
def UpperCAmelCase_ ( self ) -> Optional[Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip("""model weights aren\'t tied in TimmBackbone.""" )
def UpperCAmelCase_ ( self ) -> Union[str, Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ) -> List[Any]:
pass
@unittest.skip("""Only checkpoints on timm can be loaded into TimmBackbone""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
@unittest.skip("""TimmBackbone doesn\'t have hidden size info in its configuration.""" )
def UpperCAmelCase_ ( self ) -> str:
pass
@unittest.skip("""TimmBackbone doesn\'t support output_attentions.""" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip("""Safetensors is not supported by timm.""" )
def UpperCAmelCase_ ( self ) -> int:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def UpperCAmelCase_ ( self ) -> Tuple:
pass
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = model_class(_lowerCamelCase )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : str = [*signature.parameters.keys()]
lowerCAmelCase__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,_lowerCamelCase )
def UpperCAmelCase_ ( self ) -> Dict:
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = self.has_attentions
# no need to test all models as different heads yield the same functionality
lowerCAmelCase__ : Optional[int] = self.all_model_classes[0]
lowerCAmelCase__ : Optional[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
lowerCAmelCase__ : List[str] = self._prepare_for_class(_lowerCamelCase ,_lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = model(**_lowerCamelCase )
lowerCAmelCase__ : List[str] = outputs[0][-1]
# Encoder-/Decoder-only models
lowerCAmelCase__ : Tuple = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
lowerCAmelCase__ : List[str] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=_lowerCamelCase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def UpperCAmelCase_ ( self ) -> List[Any]:
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[int] = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
lowerCAmelCase__ : Optional[Any] = copy.deepcopy(_lowerCamelCase )
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : int = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(**_lowerCamelCase )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
lowerCAmelCase__ : Dict = copy.deepcopy(_lowerCamelCase )
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[Any] = model_class(_lowerCamelCase )
model.to(_lowerCamelCase )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(**_lowerCamelCase )
| 37 |
from __future__ import annotations
import math
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if len(a__ ) != 2 or len(a[0] ) != 2 or len(a__ ) != 2 or len(b[0] ) != 2:
raise Exception('''Matrices are not 2x2''' )
SCREAMING_SNAKE_CASE : Dict = [
[a[0][0] * b[0][0] + a[0][1] * b[1][0], a[0][0] * b[0][1] + a[0][1] * b[1][1]],
[a[1][0] * b[0][0] + a[1][1] * b[1][0], a[1][0] * b[0][1] + a[1][1] * b[1][1]],
]
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] + matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
return [
[matrix_a[row][col] - matrix_b[row][col] for col in range(len(matrix_a[row] ) )]
for row in range(len(a__ ) )
]
def UpperCAmelCase_( a__ ):
"""simple docstring"""
if len(a__ ) % 2 != 0 or len(a[0] ) % 2 != 0:
raise Exception('''Odd matrices are not supported!''' )
SCREAMING_SNAKE_CASE : str = len(a__ )
SCREAMING_SNAKE_CASE : Any = matrix_length // 2
SCREAMING_SNAKE_CASE : Tuple = [[a[i][j] for j in range(a__ , a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : Optional[int] = [
[a[i][j] for j in range(a__ , a__ )] for i in range(a__ , a__ )
]
SCREAMING_SNAKE_CASE : Optional[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ )]
SCREAMING_SNAKE_CASE : List[Any] = [[a[i][j] for j in range(a__ )] for i in range(a__ , a__ )]
return top_left, top_right, bot_left, bot_right
def UpperCAmelCase_( a__ ):
"""simple docstring"""
return len(a__ ), len(matrix[0] )
def UpperCAmelCase_( a__ ):
"""simple docstring"""
print('''\n'''.join(str(a__ ) for line in matrix ) )
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ ) == (2, 2):
return default_matrix_multiplication(a__ , a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = split_matrix(a__ )
SCREAMING_SNAKE_CASE : Dict = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = actual_strassen(matrix_addition(a__ , a__ ) , a__ )
SCREAMING_SNAKE_CASE : int = actual_strassen(a__ , matrix_subtraction(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Any = actual_strassen(matrix_addition(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : Tuple = actual_strassen(matrix_subtraction(a__ , a__ ) , matrix_addition(a__ , a__ ) )
SCREAMING_SNAKE_CASE : List[Any] = matrix_addition(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Dict = matrix_addition(a__ , a__ )
SCREAMING_SNAKE_CASE : Optional[Any] = matrix_subtraction(matrix_subtraction(matrix_addition(a__ , a__ ) , a__ ) , a__ )
# construct the new matrix from our 4 quadrants
SCREAMING_SNAKE_CASE : Optional[Any] = []
for i in range(len(a__ ) ):
new_matrix.append(top_left[i] + top_right[i] )
for i in range(len(a__ ) ):
new_matrix.append(bot_left[i] + bot_right[i] )
return new_matrix
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
if matrix_dimensions(a__ )[1] != matrix_dimensions(a__ )[0]:
SCREAMING_SNAKE_CASE : Any = (
'''Unable to multiply these matrices, please check the dimensions.\n'''
F"""Matrix A: {matrixa}\n"""
F"""Matrix B: {matrixa}"""
)
raise Exception(a__ )
SCREAMING_SNAKE_CASE : str = matrix_dimensions(a__ )
SCREAMING_SNAKE_CASE : Tuple = matrix_dimensions(a__ )
if dimensiona[0] == dimensiona[1] and dimensiona[0] == dimensiona[1]:
return [matrixa, matrixa]
SCREAMING_SNAKE_CASE : str = max(*a__ , *a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = int(math.pow(2 , math.ceil(math.loga(a__ ) ) ) )
SCREAMING_SNAKE_CASE : Optional[int] = matrixa
SCREAMING_SNAKE_CASE : Tuple = matrixa
# Adding zeros to the matrices so that the arrays dimensions are the same and also
# power of 2
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
new_matrixa[i].append(0 )
else:
new_matrixa.append([0] * maxim )
SCREAMING_SNAKE_CASE : Optional[Any] = actual_strassen(a__ , a__ )
# Removing the additional zeros
for i in range(0 , a__ ):
if i < dimensiona[0]:
for _ in range(dimensiona[1] , a__ ):
final_matrix[i].pop()
else:
final_matrix.pop()
return final_matrix
if __name__ == "__main__":
a__ : Dict = [
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 4, 3, 1],
[2, 3, 6, 7],
[3, 1, 2, 4],
[2, 3, 4, 5],
[6, 2, 3, 1],
]
a__ : Union[str, Any] = [[0, 2, 1, 1], [16, 2, 3, 3], [2, 2, 7, 7], [13, 11, 22, 4]]
print(strassen(matrixa, matrixa))
| 313 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.auto import AutoModelForSeqaSeqLM, AutoTokenizer
from .base import PipelineTool
_SCREAMING_SNAKE_CASE = {
'''Acehnese Arabic''': '''ace_Arab''',
'''Acehnese Latin''': '''ace_Latn''',
'''Mesopotamian Arabic''': '''acm_Arab''',
'''Ta\'izzi-Adeni Arabic''': '''acq_Arab''',
'''Tunisian Arabic''': '''aeb_Arab''',
'''Afrikaans''': '''afr_Latn''',
'''South Levantine Arabic''': '''ajp_Arab''',
'''Akan''': '''aka_Latn''',
'''Amharic''': '''amh_Ethi''',
'''North Levantine Arabic''': '''apc_Arab''',
'''Modern Standard Arabic''': '''arb_Arab''',
'''Modern Standard Arabic Romanized''': '''arb_Latn''',
'''Najdi Arabic''': '''ars_Arab''',
'''Moroccan Arabic''': '''ary_Arab''',
'''Egyptian Arabic''': '''arz_Arab''',
'''Assamese''': '''asm_Beng''',
'''Asturian''': '''ast_Latn''',
'''Awadhi''': '''awa_Deva''',
'''Central Aymara''': '''ayr_Latn''',
'''South Azerbaijani''': '''azb_Arab''',
'''North Azerbaijani''': '''azj_Latn''',
'''Bashkir''': '''bak_Cyrl''',
'''Bambara''': '''bam_Latn''',
'''Balinese''': '''ban_Latn''',
'''Belarusian''': '''bel_Cyrl''',
'''Bemba''': '''bem_Latn''',
'''Bengali''': '''ben_Beng''',
'''Bhojpuri''': '''bho_Deva''',
'''Banjar Arabic''': '''bjn_Arab''',
'''Banjar Latin''': '''bjn_Latn''',
'''Standard Tibetan''': '''bod_Tibt''',
'''Bosnian''': '''bos_Latn''',
'''Buginese''': '''bug_Latn''',
'''Bulgarian''': '''bul_Cyrl''',
'''Catalan''': '''cat_Latn''',
'''Cebuano''': '''ceb_Latn''',
'''Czech''': '''ces_Latn''',
'''Chokwe''': '''cjk_Latn''',
'''Central Kurdish''': '''ckb_Arab''',
'''Crimean Tatar''': '''crh_Latn''',
'''Welsh''': '''cym_Latn''',
'''Danish''': '''dan_Latn''',
'''German''': '''deu_Latn''',
'''Southwestern Dinka''': '''dik_Latn''',
'''Dyula''': '''dyu_Latn''',
'''Dzongkha''': '''dzo_Tibt''',
'''Greek''': '''ell_Grek''',
'''English''': '''eng_Latn''',
'''Esperanto''': '''epo_Latn''',
'''Estonian''': '''est_Latn''',
'''Basque''': '''eus_Latn''',
'''Ewe''': '''ewe_Latn''',
'''Faroese''': '''fao_Latn''',
'''Fijian''': '''fij_Latn''',
'''Finnish''': '''fin_Latn''',
'''Fon''': '''fon_Latn''',
'''French''': '''fra_Latn''',
'''Friulian''': '''fur_Latn''',
'''Nigerian Fulfulde''': '''fuv_Latn''',
'''Scottish Gaelic''': '''gla_Latn''',
'''Irish''': '''gle_Latn''',
'''Galician''': '''glg_Latn''',
'''Guarani''': '''grn_Latn''',
'''Gujarati''': '''guj_Gujr''',
'''Haitian Creole''': '''hat_Latn''',
'''Hausa''': '''hau_Latn''',
'''Hebrew''': '''heb_Hebr''',
'''Hindi''': '''hin_Deva''',
'''Chhattisgarhi''': '''hne_Deva''',
'''Croatian''': '''hrv_Latn''',
'''Hungarian''': '''hun_Latn''',
'''Armenian''': '''hye_Armn''',
'''Igbo''': '''ibo_Latn''',
'''Ilocano''': '''ilo_Latn''',
'''Indonesian''': '''ind_Latn''',
'''Icelandic''': '''isl_Latn''',
'''Italian''': '''ita_Latn''',
'''Javanese''': '''jav_Latn''',
'''Japanese''': '''jpn_Jpan''',
'''Kabyle''': '''kab_Latn''',
'''Jingpho''': '''kac_Latn''',
'''Kamba''': '''kam_Latn''',
'''Kannada''': '''kan_Knda''',
'''Kashmiri Arabic''': '''kas_Arab''',
'''Kashmiri Devanagari''': '''kas_Deva''',
'''Georgian''': '''kat_Geor''',
'''Central Kanuri Arabic''': '''knc_Arab''',
'''Central Kanuri Latin''': '''knc_Latn''',
'''Kazakh''': '''kaz_Cyrl''',
'''Kabiyè''': '''kbp_Latn''',
'''Kabuverdianu''': '''kea_Latn''',
'''Khmer''': '''khm_Khmr''',
'''Kikuyu''': '''kik_Latn''',
'''Kinyarwanda''': '''kin_Latn''',
'''Kyrgyz''': '''kir_Cyrl''',
'''Kimbundu''': '''kmb_Latn''',
'''Northern Kurdish''': '''kmr_Latn''',
'''Kikongo''': '''kon_Latn''',
'''Korean''': '''kor_Hang''',
'''Lao''': '''lao_Laoo''',
'''Ligurian''': '''lij_Latn''',
'''Limburgish''': '''lim_Latn''',
'''Lingala''': '''lin_Latn''',
'''Lithuanian''': '''lit_Latn''',
'''Lombard''': '''lmo_Latn''',
'''Latgalian''': '''ltg_Latn''',
'''Luxembourgish''': '''ltz_Latn''',
'''Luba-Kasai''': '''lua_Latn''',
'''Ganda''': '''lug_Latn''',
'''Luo''': '''luo_Latn''',
'''Mizo''': '''lus_Latn''',
'''Standard Latvian''': '''lvs_Latn''',
'''Magahi''': '''mag_Deva''',
'''Maithili''': '''mai_Deva''',
'''Malayalam''': '''mal_Mlym''',
'''Marathi''': '''mar_Deva''',
'''Minangkabau Arabic ''': '''min_Arab''',
'''Minangkabau Latin''': '''min_Latn''',
'''Macedonian''': '''mkd_Cyrl''',
'''Plateau Malagasy''': '''plt_Latn''',
'''Maltese''': '''mlt_Latn''',
'''Meitei Bengali''': '''mni_Beng''',
'''Halh Mongolian''': '''khk_Cyrl''',
'''Mossi''': '''mos_Latn''',
'''Maori''': '''mri_Latn''',
'''Burmese''': '''mya_Mymr''',
'''Dutch''': '''nld_Latn''',
'''Norwegian Nynorsk''': '''nno_Latn''',
'''Norwegian Bokmål''': '''nob_Latn''',
'''Nepali''': '''npi_Deva''',
'''Northern Sotho''': '''nso_Latn''',
'''Nuer''': '''nus_Latn''',
'''Nyanja''': '''nya_Latn''',
'''Occitan''': '''oci_Latn''',
'''West Central Oromo''': '''gaz_Latn''',
'''Odia''': '''ory_Orya''',
'''Pangasinan''': '''pag_Latn''',
'''Eastern Panjabi''': '''pan_Guru''',
'''Papiamento''': '''pap_Latn''',
'''Western Persian''': '''pes_Arab''',
'''Polish''': '''pol_Latn''',
'''Portuguese''': '''por_Latn''',
'''Dari''': '''prs_Arab''',
'''Southern Pashto''': '''pbt_Arab''',
'''Ayacucho Quechua''': '''quy_Latn''',
'''Romanian''': '''ron_Latn''',
'''Rundi''': '''run_Latn''',
'''Russian''': '''rus_Cyrl''',
'''Sango''': '''sag_Latn''',
'''Sanskrit''': '''san_Deva''',
'''Santali''': '''sat_Olck''',
'''Sicilian''': '''scn_Latn''',
'''Shan''': '''shn_Mymr''',
'''Sinhala''': '''sin_Sinh''',
'''Slovak''': '''slk_Latn''',
'''Slovenian''': '''slv_Latn''',
'''Samoan''': '''smo_Latn''',
'''Shona''': '''sna_Latn''',
'''Sindhi''': '''snd_Arab''',
'''Somali''': '''som_Latn''',
'''Southern Sotho''': '''sot_Latn''',
'''Spanish''': '''spa_Latn''',
'''Tosk Albanian''': '''als_Latn''',
'''Sardinian''': '''srd_Latn''',
'''Serbian''': '''srp_Cyrl''',
'''Swati''': '''ssw_Latn''',
'''Sundanese''': '''sun_Latn''',
'''Swedish''': '''swe_Latn''',
'''Swahili''': '''swh_Latn''',
'''Silesian''': '''szl_Latn''',
'''Tamil''': '''tam_Taml''',
'''Tatar''': '''tat_Cyrl''',
'''Telugu''': '''tel_Telu''',
'''Tajik''': '''tgk_Cyrl''',
'''Tagalog''': '''tgl_Latn''',
'''Thai''': '''tha_Thai''',
'''Tigrinya''': '''tir_Ethi''',
'''Tamasheq Latin''': '''taq_Latn''',
'''Tamasheq Tifinagh''': '''taq_Tfng''',
'''Tok Pisin''': '''tpi_Latn''',
'''Tswana''': '''tsn_Latn''',
'''Tsonga''': '''tso_Latn''',
'''Turkmen''': '''tuk_Latn''',
'''Tumbuka''': '''tum_Latn''',
'''Turkish''': '''tur_Latn''',
'''Twi''': '''twi_Latn''',
'''Central Atlas Tamazight''': '''tzm_Tfng''',
'''Uyghur''': '''uig_Arab''',
'''Ukrainian''': '''ukr_Cyrl''',
'''Umbundu''': '''umb_Latn''',
'''Urdu''': '''urd_Arab''',
'''Northern Uzbek''': '''uzn_Latn''',
'''Venetian''': '''vec_Latn''',
'''Vietnamese''': '''vie_Latn''',
'''Waray''': '''war_Latn''',
'''Wolof''': '''wol_Latn''',
'''Xhosa''': '''xho_Latn''',
'''Eastern Yiddish''': '''ydd_Hebr''',
'''Yoruba''': '''yor_Latn''',
'''Yue Chinese''': '''yue_Hant''',
'''Chinese Simplified''': '''zho_Hans''',
'''Chinese Traditional''': '''zho_Hant''',
'''Standard Malay''': '''zsm_Latn''',
'''Zulu''': '''zul_Latn''',
}
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Any = "facebook/nllb-200-distilled-600M"
a : Optional[int] = (
"This is a tool that translates text from a language to another. It takes three inputs: `text`, which should "
"be the text to translate, `src_lang`, which should be the language of the text to translate and `tgt_lang`, "
"which should be the language for the desired ouput language. Both `src_lang` and `tgt_lang` are written in "
"plain English, such as 'Romanian', or 'Albanian'. It returns the text translated in `tgt_lang`."
)
a : Union[str, Any] = "translator"
a : int = AutoTokenizer
a : Optional[Any] = AutoModelForSeqaSeqLM
a : Dict = LANGUAGE_CODES
a : Optional[Any] = ["text", "text", "text"]
a : List[Any] = ["text"]
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
if src_lang not in self.lang_to_code:
raise ValueError(f"{src_lang} is not a supported language." )
if tgt_lang not in self.lang_to_code:
raise ValueError(f"{tgt_lang} is not a supported language." )
__lowercase = self.lang_to_code[src_lang]
__lowercase = self.lang_to_code[tgt_lang]
return self.pre_processor._build_translation_inputs(
_lowerCamelCase ,return_tensors='''pt''' ,src_lang=_lowerCamelCase ,tgt_lang=_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> Dict:
'''simple docstring'''
return self.model.generate(**_lowerCamelCase )
def _UpperCAmelCase (self ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
return self.post_processor.decode(outputs[0].tolist() ,skip_special_tokens=_lowerCamelCase )
| 217 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_SCREAMING_SNAKE_CASE = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 217 | 1 |
from collections.abc import Iterable
from typing import Generic, TypeVar
lowercase__ : Tuple = TypeVar("_T")
class UpperCAmelCase ( Generic[_T] ):
'''simple docstring'''
def __init__( self : str , __lowercase : Iterable[_T] | None = None ):
"""simple docstring"""
snake_case_ = list(iterable or [] )
snake_case_ = []
def __len__( self : Optional[int] ):
"""simple docstring"""
return len(self._stacka ) + len(self._stacka )
def __repr__( self : Optional[Any] ):
"""simple docstring"""
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def snake_case__ ( self : Union[str, Any] , __lowercase : _T ):
"""simple docstring"""
self._stacka.append(__lowercase )
def snake_case__ ( self : str ):
"""simple docstring"""
snake_case_ = self._stacka.pop
snake_case_ = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError("Queue is empty" )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod()
| 187 |
class UpperCAmelCase :
'''simple docstring'''
def __init__( self : Dict ):
"""simple docstring"""
snake_case_ = {} # Mapping from char to TrieNode
snake_case_ = False
def snake_case__ ( self : Dict , __lowercase : list[str] ):
"""simple docstring"""
for word in words:
self.insert(__lowercase )
def snake_case__ ( self : List[str] , __lowercase : str ):
"""simple docstring"""
snake_case_ = self
for char in word:
if char not in curr.nodes:
snake_case_ = TrieNode()
snake_case_ = curr.nodes[char]
snake_case_ = True
def snake_case__ ( self : List[Any] , __lowercase : str ):
"""simple docstring"""
snake_case_ = self
for char in word:
if char not in curr.nodes:
return False
snake_case_ = curr.nodes[char]
return curr.is_leaf
def snake_case__ ( self : Optional[Any] , __lowercase : str ):
"""simple docstring"""
def _delete(__lowercase : TrieNode , __lowercase : str , __lowercase : int ) -> bool:
if index == len(__lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case_ = False
return len(curr.nodes ) == 0
snake_case_ = word[index]
snake_case_ = curr.nodes.get(__lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case_ = _delete(__lowercase , __lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , __lowercase , 0 )
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
if node.is_leaf:
print(_A , end=" " )
for key, value in node.nodes.items():
print_words(_A , word + key )
def lowerCamelCase__ ( ):
'''simple docstring'''
snake_case_ = "banana bananas bandana band apple all beast".split()
snake_case_ = TrieNode()
root.insert_many(_A )
# print_words(root, "")
assert all(root.find(_A ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def lowerCamelCase__ ( _A , _A ):
'''simple docstring'''
print(str(_A ) , "works!" if passes else "doesn't work :(" )
def lowerCamelCase__ ( ):
'''simple docstring'''
assert test_trie()
def lowerCamelCase__ ( ):
'''simple docstring'''
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 187 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from transformers import CLIPImageProcessor, CLIPVisionModel
from ...models import PriorTransformer
from ...pipelines import DiffusionPipeline
from ...schedulers import HeunDiscreteScheduler
from ...utils import (
BaseOutput,
is_accelerate_available,
logging,
randn_tensor,
replace_example_docstring,
)
from .renderer import ShapERenderer
snake_case__ : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case__ : Tuple = '\n Examples:\n ```py\n >>> from PIL import Image\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from diffusers.utils import export_to_gif, load_image\n\n >>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")\n\n >>> repo = "openai/shap-e-img2img"\n >>> pipe = DiffusionPipeline.from_pretrained(repo, torch_dtype=torch.float16)\n >>> pipe = pipe.to(device)\n\n >>> guidance_scale = 3.0\n >>> image_url = "https://hf.co/datasets/diffusers/docs-images/resolve/main/shap-e/corgi.png"\n >>> image = load_image(image_url).convert("RGB")\n\n >>> images = pipe(\n ... image,\n ... guidance_scale=guidance_scale,\n ... num_inference_steps=64,\n ... frame_size=256,\n ... ).images\n\n >>> gif_path = export_to_gif(images[0], "corgi_3d.gif")\n ```\n'
@dataclass
class A_ ( _lowerCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = 42
class A_ ( _lowerCamelCase ):
"""simple docstring"""
def __init__(self :Any , _UpperCamelCase :PriorTransformer , _UpperCamelCase :CLIPVisionModel , _UpperCamelCase :CLIPImageProcessor , _UpperCamelCase :HeunDiscreteScheduler , _UpperCamelCase :ShapERenderer , )-> Union[str, Any]:
super().__init__()
self.register_modules(
prior=_UpperCamelCase , image_encoder=_UpperCamelCase , image_processor=_UpperCamelCase , scheduler=_UpperCamelCase , renderer=_UpperCamelCase , )
def _lowerCAmelCase (self :Tuple , _UpperCamelCase :Any , _UpperCamelCase :str , _UpperCamelCase :List[Any] , _UpperCamelCase :List[Any] , _UpperCamelCase :int , _UpperCamelCase :Tuple )-> Optional[int]:
if latents is None:
__A = randn_tensor(_UpperCamelCase , generator=_UpperCamelCase , device=_UpperCamelCase , dtype=_UpperCamelCase )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
__A = latents.to(_UpperCamelCase )
__A = latents * scheduler.init_noise_sigma
return latents
def _lowerCAmelCase (self :str , _UpperCamelCase :Optional[Any]=0 )-> int:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__A = torch.device(f"""cuda:{gpu_id}""" )
__A = [self.image_encoder, self.prior]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_UpperCamelCase , _UpperCamelCase )
@property
def _lowerCAmelCase (self :Any )-> int:
if self.device != torch.device('''meta''' ) or not hasattr(self.image_encoder , '''_hf_hook''' ):
return self.device
for module in self.image_encoder.modules():
if (
hasattr(_UpperCamelCase , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
def _lowerCAmelCase (self :Optional[Any] , _UpperCamelCase :str , _UpperCamelCase :Optional[Any] , _UpperCamelCase :int , _UpperCamelCase :Union[str, Any] , )-> Any:
if isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , torch.Tensor ):
__A = torch.cat(_UpperCamelCase , axis=0 ) if image[0].ndim == 4 else torch.stack(_UpperCamelCase , axis=0 )
if not isinstance(_UpperCamelCase , torch.Tensor ):
__A = self.image_processor(_UpperCamelCase , return_tensors='''pt''' ).pixel_values[0].unsqueeze(0 )
__A = image.to(dtype=self.image_encoder.dtype , device=_UpperCamelCase )
__A = self.image_encoder(_UpperCamelCase )['''last_hidden_state''']
__A = image_embeds[:, 1:, :].contiguous() # batch_size, dim, 256
__A = image_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
__A = torch.zeros_like(_UpperCamelCase )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__A = torch.cat([negative_image_embeds, image_embeds] )
return image_embeds
@torch.no_grad()
@replace_example_docstring(_UpperCamelCase )
def __call__(self :Optional[int] , _UpperCamelCase :Union[PIL.Image.Image, List[PIL.Image.Image]] , _UpperCamelCase :int = 1 , _UpperCamelCase :int = 25 , _UpperCamelCase :Optional[Union[torch.Generator, List[torch.Generator]]] = None , _UpperCamelCase :Optional[torch.FloatTensor] = None , _UpperCamelCase :float = 4.0 , _UpperCamelCase :int = 64 , _UpperCamelCase :Optional[str] = "pil" , _UpperCamelCase :bool = True , )-> List[str]:
if isinstance(_UpperCamelCase , PIL.Image.Image ):
__A = 1
elif isinstance(_UpperCamelCase , torch.Tensor ):
__A = image.shape[0]
elif isinstance(_UpperCamelCase , _UpperCamelCase ) and isinstance(image[0] , (torch.Tensor, PIL.Image.Image) ):
__A = len(_UpperCamelCase )
else:
raise ValueError(
f"""`image` has to be of type `PIL.Image.Image`, `torch.Tensor`, `List[PIL.Image.Image]` or `List[torch.Tensor]` but is {type(_UpperCamelCase )}""" )
__A = self._execution_device
__A = batch_size * num_images_per_prompt
__A = guidance_scale > 1.0
__A = self._encode_image(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# prior
self.scheduler.set_timesteps(_UpperCamelCase , device=_UpperCamelCase )
__A = self.scheduler.timesteps
__A = self.prior.config.num_embeddings
__A = self.prior.config.embedding_dim
__A = self.prepare_latents(
(batch_size, num_embeddings * embedding_dim) , image_embeds.dtype , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , self.scheduler , )
# YiYi notes: for testing only to match ldm, we can directly create a latents with desired shape: batch_size, num_embeddings, embedding_dim
__A = latents.reshape(latents.shape[0] , _UpperCamelCase , _UpperCamelCase )
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__A = self.scheduler.scale_model_input(_UpperCamelCase , _UpperCamelCase )
__A = self.prior(
_UpperCamelCase , timestep=_UpperCamelCase , proj_embedding=_UpperCamelCase , ).predicted_image_embedding
# remove the variance
__A , __A = noise_pred.split(
scaled_model_input.shape[2] , dim=2 ) # batch_size, num_embeddings, embedding_dim
if do_classifier_free_guidance is not None:
__A , __A = noise_pred.chunk(2 )
__A = noise_pred_uncond + guidance_scale * (noise_pred - noise_pred_uncond)
__A = self.scheduler.step(
_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , ).prev_sample
if output_type == "latent":
return ShapEPipelineOutput(images=_UpperCamelCase )
__A = []
for i, latent in enumerate(_UpperCamelCase ):
print()
__A = self.renderer.decode(
latent[None, :] , _UpperCamelCase , size=_UpperCamelCase , ray_batch_size=4096 , n_coarse_samples=64 , n_fine_samples=128 , )
images.append(_UpperCamelCase )
__A = torch.stack(_UpperCamelCase )
if output_type not in ["np", "pil"]:
raise ValueError(f"""Only the output types `pil` and `np` are supported not output_type={output_type}""" )
__A = images.cpu().numpy()
if output_type == "pil":
__A = [self.numpy_to_pil(_UpperCamelCase ) for image in images]
# Offload last model to CPU
if hasattr(self , '''final_offload_hook''' ) and self.final_offload_hook is not None:
self.final_offload_hook.offload()
if not return_dict:
return (images,)
return ShapEPipelineOutput(images=_UpperCamelCase )
| 356 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class A_ ( unittest.TestCase ):
@slow
def _lowerCAmelCase (self :Dict )-> Dict:
__A = AutoModelForSeqaSeqLM.from_pretrained('''google/mt5-small''' , return_dict=_UpperCamelCase ).to(_UpperCamelCase )
__A = AutoTokenizer.from_pretrained('''google/mt5-small''' )
__A = tokenizer('''Hello there''' , return_tensors='''pt''' ).input_ids
__A = tokenizer('''Hi I am''' , return_tensors='''pt''' ).input_ids
__A = model(input_ids.to(_UpperCamelCase ) , labels=labels.to(_UpperCamelCase ) ).loss
__A = -(labels.shape[-1] * loss.item())
__A = -8_4.9_1_2_7
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 250 | 0 |
'''simple docstring'''
def __lowerCamelCase ( A__ , A__ , A__ ) -> Any:
"""simple docstring"""
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(A__ , n - 1 , A__ ) * a) % mod
else:
UpperCamelCase = binary_exponentiation(A__ , n / 2 , A__ )
return (b * b) % mod
# a prime number
_lowerCamelCase : Dict = 701
_lowerCamelCase : Dict = 10_0000_0000
_lowerCamelCase : Tuple = 10
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 28 | import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class lowerCamelCase (SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = CpmAntTokenizer
lowerCamelCase__ = False
def __A ( self : List[str] ) -> str:
super().setUp()
SCREAMING_SNAKE_CASE_ = [
"<d>",
"</d>",
"<s>",
"</s>",
"</_>",
"<unk>",
"<pad>",
"</n>",
"我",
"是",
"C",
"P",
"M",
"A",
"n",
"t",
]
SCREAMING_SNAKE_CASE_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
@tooslow
def __A ( self : int ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = CpmAntTokenizer.from_pretrained("openbmb/cpm-ant-10b" )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = ["今天", "天气", "真", "好", "!"]
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(__magic_name__ )
self.assertListEqual(__magic_name__ , __magic_name__ )
SCREAMING_SNAKE_CASE_ = "今天天气真好!"
SCREAMING_SNAKE_CASE_ = [tokenizer.bos_token] + tokens
SCREAMING_SNAKE_CASE_ = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__magic_name__ ) , __magic_name__ )
SCREAMING_SNAKE_CASE_ = tokenizer.decode(__magic_name__ )
self.assertEqual(__magic_name__ , __magic_name__ )
| 118 | 0 |
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_lowerCamelCase : int = {
'''sample_size''': 3_2,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [3_2, 6_4],
'''attention_head_dim''': 8,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_lowerCamelCase : Dict = {
'''sample_size''': 6_4,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 3,
'''num_class_embeds''': 1_0_0_0,
'''block_out_channels''': [1_9_2, 1_9_2 * 2, 1_9_2 * 3, 1_9_2 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''scale_shift''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_lowerCamelCase : Tuple = {
'''sample_size''': 2_5_6,
'''in_channels''': 3,
'''out_channels''': 3,
'''layers_per_block''': 2,
'''num_class_embeds''': None,
'''block_out_channels''': [2_5_6, 2_5_6, 2_5_6 * 2, 2_5_6 * 2, 2_5_6 * 4, 2_5_6 * 4],
'''attention_head_dim''': 6_4,
'''down_block_types''': [
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''ResnetDownsampleBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
'''AttnDownBlock2D''',
],
'''up_block_types''': [
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''AttnUpBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
'''ResnetUpsampleBlock2D''',
],
'''resnet_time_scale_shift''': '''default''',
'''upsample_type''': '''resnet''',
'''downsample_type''': '''resnet''',
}
_lowerCamelCase : Dict = {
'''num_train_timesteps''': 4_0,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_lowerCamelCase : Tuple = {
'''num_train_timesteps''': 2_0_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
_lowerCamelCase : Any = {
'''num_train_timesteps''': 1_5_1,
'''sigma_min''': 0.002,
'''sigma_max''': 80.0,
}
def _a ( SCREAMING_SNAKE_CASE__ : Any ) -> Dict:
'''simple docstring'''
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("boolean value expected" )
def _a ( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int]=False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
SCREAMING_SNAKE_CASE__ : str = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
SCREAMING_SNAKE_CASE__ : int = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint[f'''{old_prefix}.skip_connection.weight''']
SCREAMING_SNAKE_CASE__ : Dict = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def _a ( SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[str]=None ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 , dim=0 )
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint[f'''{old_prefix}.norm.weight''']
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint[f'''{old_prefix}.norm.bias''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = weight_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : str = bias_q.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : List[Any] = weight_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : int = bias_k.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ , map_location="cpu" )
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint["time_embed.0.weight"]
SCREAMING_SNAKE_CASE__ : str = checkpoint["time_embed.0.bias"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint["time_embed.2.weight"]
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint["time_embed.2.bias"]
if unet_config["num_class_embeds"] is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = checkpoint["label_emb.weight"]
SCREAMING_SNAKE_CASE__ : Dict = checkpoint["input_blocks.0.0.weight"]
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint["input_blocks.0.0.bias"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = unet_config["down_block_types"]
SCREAMING_SNAKE_CASE__ : List[Any] = unet_config["layers_per_block"]
SCREAMING_SNAKE_CASE__ : Dict = unet_config["attention_head_dim"]
SCREAMING_SNAKE_CASE__ : Any = unet_config["block_out_channels"]
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : List[Any] = channels_list[0]
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Any = channels_list[i]
SCREAMING_SNAKE_CASE__ : Optional[Any] = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : int = f'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : List[str] = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(SCREAMING_SNAKE_CASE__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''down_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : List[str] = f'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : int = True if j == 0 and downsample_block_has_skip else False
SCREAMING_SNAKE_CASE__ : Any = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[str] = f'''down_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''input_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = convert_attention(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE__ ) - 1:
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''down_blocks.{i}.downsamplers.0'''
SCREAMING_SNAKE_CASE__ : int = f'''input_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Tuple = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
current_layer += 1
SCREAMING_SNAKE_CASE__ : List[Any] = current_channels
# hardcoded the mid-block for now
SCREAMING_SNAKE_CASE__ : str = "mid_block.resnets.0"
SCREAMING_SNAKE_CASE__ : int = "middle_block.0"
SCREAMING_SNAKE_CASE__ : Any = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = "mid_block.attentions.0"
SCREAMING_SNAKE_CASE__ : Union[str, Any] = "middle_block.1"
SCREAMING_SNAKE_CASE__ : Any = convert_attention(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = "mid_block.resnets.1"
SCREAMING_SNAKE_CASE__ : str = "middle_block.2"
SCREAMING_SNAKE_CASE__ : List[str] = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : int = 0
SCREAMING_SNAKE_CASE__ : int = unet_config["up_block_types"]
for i, layer_type in enumerate(SCREAMING_SNAKE_CASE__ ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ : List[str] = f'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : List[Any] = f'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Optional[int] = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE__ ) - 1:
SCREAMING_SNAKE_CASE__ : Tuple = f'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ : List[str] = f'''output_blocks.{current_layer-1}.1'''
SCREAMING_SNAKE_CASE__ : List[Any] = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''up_blocks.{i}.resnets.{j}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f'''output_blocks.{current_layer}.0'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_skip=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = f'''up_blocks.{i}.attentions.{j}'''
SCREAMING_SNAKE_CASE__ : Tuple = f'''output_blocks.{current_layer}.1'''
SCREAMING_SNAKE_CASE__ : Tuple = convert_attention(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
current_layer += 1
if i != len(SCREAMING_SNAKE_CASE__ ) - 1:
SCREAMING_SNAKE_CASE__ : int = f'''up_blocks.{i}.upsamplers.0'''
SCREAMING_SNAKE_CASE__ : Dict = f'''output_blocks.{current_layer-1}.2'''
SCREAMING_SNAKE_CASE__ : int = convert_resnet(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = checkpoint["out.0.weight"]
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint["out.0.bias"]
SCREAMING_SNAKE_CASE__ : Any = checkpoint["out.2.weight"]
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint["out.2.bias"]
return new_checkpoint
if __name__ == "__main__":
_lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument('''--unet_path''', default=None, type=str, required=True, help='''Path to the unet.pt to convert.''')
parser.add_argument(
'''--dump_path''', default=None, type=str, required=True, help='''Path to output the converted UNet model.'''
)
parser.add_argument('''--class_cond''', default=True, type=str, help='''Whether the model is class-conditional.''')
_lowerCamelCase : List[Any] = parser.parse_args()
_lowerCamelCase : int = strabool(args.class_cond)
_lowerCamelCase : int = os.path.basename(args.unet_path)
print(f"Checkpoint: {ckpt_name}")
# Get U-Net config
if "imagenet64" in ckpt_name:
_lowerCamelCase : str = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCamelCase : Any = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_lowerCamelCase : Any = TEST_UNET_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
if not args.class_cond:
_lowerCamelCase : Any = None
_lowerCamelCase : Tuple = con_pt_to_diffuser(args.unet_path, unet_config)
_lowerCamelCase : str = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_lowerCamelCase : Optional[int] = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_lowerCamelCase : Optional[int] = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_lowerCamelCase : Optional[int] = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"Checkpoint type {ckpt_name} is not currently supported.")
_lowerCamelCase : Union[str, Any] = CMStochasticIterativeScheduler(**scheduler_config)
_lowerCamelCase : List[str] = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 191 |
import requests
from bsa import BeautifulSoup
def _a ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : dict ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE__ , params=SCREAMING_SNAKE_CASE__ ).content , "html.parser" )
SCREAMING_SNAKE_CASE__ : str = soup.find("div" , attrs={"class": "gs_ri"} )
SCREAMING_SNAKE_CASE__ : int = div.find("div" , attrs={"class": "gs_fl"} ).find_all("a" )
return anchors[2].get_text()
if __name__ == "__main__":
_lowerCamelCase : Any = {
'''title''': (
'''Precisely geometry controlled microsupercapacitors for ultrahigh areal '''
'''capacitance, volumetric capacitance, and energy density'''
),
'''journal''': '''Chem. Mater.''',
'''volume''': 3_0,
'''pages''': '''3979-3990''',
'''year''': 2_0_1_8,
'''hl''': '''en''',
}
print(get_citation('''https://scholar.google.com/scholar_lookup''', params=params))
| 191 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_UpperCamelCase = {'''processing_wav2vec2_with_lm''': ['''Wav2Vec2ProcessorWithLM''']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCamelCase = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCamelCase = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_UpperCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 254 | 1 |
'''simple docstring'''
def _A ( A__ = 4000000 ):
"""simple docstring"""
__lowercase = []
__lowercase , __lowercase = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(_lowercase )
__lowercase , __lowercase = b, a + b
return sum(_lowercase )
if __name__ == "__main__":
print(f'{solution() = }')
| 358 |
'''simple docstring'''
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'The RoBERTa Model transformer with early exiting (DeeRoBERTa). ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[Any] = RobertaConfig
SCREAMING_SNAKE_CASE : Optional[Any] = 'roberta'
def __init__( self : List[Any] ,lowercase__ : Optional[Any] ):
super().__init__(lowercase__ )
__lowercase = RobertaEmbeddings(lowercase__ )
self.init_weights()
@add_start_docstrings(
'RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,\n also takes care of multi-layer training. ' , lowerCamelCase__ , )
class lowercase_ (lowerCamelCase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Any = RobertaConfig
SCREAMING_SNAKE_CASE : Any = 'roberta'
def __init__( self : Union[str, Any] ,lowercase__ : int ):
super().__init__(lowercase__ )
__lowercase = config.num_labels
__lowercase = config.num_hidden_layers
__lowercase = DeeRobertaModel(lowercase__ )
__lowercase = nn.Dropout(config.hidden_dropout_prob )
__lowercase = nn.Linear(config.hidden_size ,self.config.num_labels )
@add_start_docstrings_to_model_forward(lowercase__ )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,lowercase__ : Optional[Any]=None ,lowercase__ : Union[str, Any]=None ,lowercase__ : Optional[int]=None ,lowercase__ : int=None ,lowercase__ : Dict=None ,lowercase__ : List[Any]=None ,lowercase__ : str=None ,lowercase__ : List[Any]=-1 ,lowercase__ : Tuple=False ,):
__lowercase = self.num_layers
try:
__lowercase = self.roberta(
lowercase__ ,attention_mask=lowercase__ ,token_type_ids=lowercase__ ,position_ids=lowercase__ ,head_mask=lowercase__ ,inputs_embeds=lowercase__ ,)
__lowercase = outputs[1]
__lowercase = self.dropout(lowercase__ )
__lowercase = self.classifier(lowercase__ )
__lowercase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
__lowercase = e.message
__lowercase = e.exit_layer
__lowercase = outputs[0]
if not self.training:
__lowercase = entropy(lowercase__ )
__lowercase = []
__lowercase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
__lowercase = []
for highway_exit in outputs[-1]:
__lowercase = highway_exit[0]
if not self.training:
highway_logits_all.append(lowercase__ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
__lowercase = MSELoss()
__lowercase = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(lowercase__ )
if train_highway:
__lowercase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
__lowercase = (loss,) + outputs
if not self.training:
__lowercase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
__lowercase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 52 | 0 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPanoramaPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, skip_mps
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a :int = StableDiffusionPanoramaPipeline
a :List[str] = TEXT_TO_IMAGE_PARAMS
a :Dict = TEXT_TO_IMAGE_BATCH_PARAMS
a :Any = TEXT_TO_IMAGE_IMAGE_PARAMS
a :List[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
lowercase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=1 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=3_2 , )
lowercase_ = DDIMScheduler()
torch.manual_seed(0 )
lowercase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
torch.manual_seed(0 )
lowercase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase_ = CLIPTextModel(_snake_case )
lowercase_ = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
lowercase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple=0 ) -> Union[str, Any]:
lowercase_ = torch.manual_seed(_snake_case )
lowercase_ = {
'prompt': 'a photo of the dolomites',
'generator': generator,
# Setting height and width to None to prevent OOMs on CPU.
'height': None,
'width': None,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self : Optional[Any] ) -> str:
lowercase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionPanoramaPipeline(**_snake_case )
lowercase_ = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = self.get_dummy_inputs(_snake_case )
lowercase_ = sd_pipe(**_snake_case ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.61_86, 0.53_74, 0.49_15, 0.41_35, 0.41_14, 0.45_63, 0.51_28, 0.49_77, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Tuple ) -> Any:
super().test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowercase ( self : Optional[Any] ) -> Any:
super().test_inference_batch_single_identical(batch_size=2 , expected_max_diff=3.25e-3 )
def _lowercase ( self : str ) -> List[str]:
lowercase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionPanoramaPipeline(**_snake_case )
lowercase_ = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = self.get_dummy_inputs(_snake_case )
lowercase_ = 'french fries'
lowercase_ = sd_pipe(**_snake_case , negative_prompt=_snake_case )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Optional[Any] ) -> Optional[Any]:
lowercase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = StableDiffusionPanoramaPipeline(**_snake_case )
lowercase_ = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = self.get_dummy_inputs(_snake_case )
lowercase_ = sd_pipe(**_snake_case , view_batch_size=2 )
lowercase_ = output.images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.61_87, 0.53_75, 0.49_15, 0.41_36, 0.41_14, 0.45_63, 0.51_28, 0.49_76, 0.47_57] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Dict ) -> Any:
lowercase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' )
lowercase_ = StableDiffusionPanoramaPipeline(**_snake_case )
lowercase_ = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = self.get_dummy_inputs(_snake_case )
lowercase_ = sd_pipe(**_snake_case ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.40_24, 0.65_10, 0.49_01, 0.53_78, 0.58_13, 0.56_22, 0.47_95, 0.44_67, 0.49_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Union[str, Any] ) -> Any:
lowercase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowercase_ = self.get_dummy_components()
lowercase_ = PNDMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='''scaled_linear''' , skip_prk_steps=_snake_case )
lowercase_ = StableDiffusionPanoramaPipeline(**_snake_case )
lowercase_ = sd_pipe.to(_snake_case )
sd_pipe.set_progress_bar_config(disable=_snake_case )
lowercase_ = self.get_dummy_inputs(_snake_case )
lowercase_ = sd_pipe(**_snake_case ).images
lowercase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowercase_ = np.array([0.63_91, 0.62_91, 0.48_61, 0.51_34, 0.55_52, 0.45_78, 0.50_32, 0.50_23, 0.45_39] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch_gpu
class lowercase__( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Dict ) -> List[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any=0 ) -> Any:
lowercase_ = torch.manual_seed(_snake_case )
lowercase_ = {
'prompt': 'a photo of the dolomites',
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self : Any ) -> Optional[Any]:
lowercase_ = 'stabilityai/stable-diffusion-2-base'
lowercase_ = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
lowercase_ = self.get_inputs()
lowercase_ = pipe(**_snake_case ).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
lowercase_ = np.array(
[
0.36_96_83_92,
0.27_02_53_72,
0.32_44_67_66,
0.28_37_93_87,
0.36_36_32_74,
0.30_73_33_47,
0.27_10_00_27,
0.27_05_41_25,
0.25_53_60_96,
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-2
def _lowercase ( self : Optional[Any] ) -> str:
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-base''' , safety_checker=_snake_case )
lowercase_ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
lowercase_ = self.get_inputs()
lowercase_ = pipe(**_snake_case ).images
lowercase_ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 2_0_4_8, 3)
lowercase_ = np.array(
[
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : int ) -> Any:
lowercase_ = 0
def callback_fn(SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : torch.FloatTensor ) -> None:
lowercase_ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
lowercase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
lowercase_ = latents[0, -3:, -3:, -1]
lowercase_ = np.array(
[
0.18_68_18_69,
0.33_90_78_16,
0.5_36_12_76,
0.14_43_28_65,
-0.02_85_66_11,
-0.73_94_11_23,
0.23_39_79_87,
0.47_32_26_82,
-0.37_82_31_64,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
lowercase_ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 2_5_6)
lowercase_ = latents[0, -3:, -3:, -1]
lowercase_ = np.array(
[
0.18_53_96_45,
0.33_98_72_48,
0.5_37_85_59,
0.14_43_71_42,
-0.02_45_52_61,
-0.7_33_83_17,
0.23_99_07_55,
0.47_35_62_72,
-0.3_78_65_05,
] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
lowercase_ = False
lowercase_ = 'stabilityai/stable-diffusion-2-base'
lowercase_ = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
lowercase_ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing()
lowercase_ = self.get_inputs()
pipe(**_snake_case , callback=_snake_case , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowercase ( self : Optional[Any] ) -> Optional[int]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowercase_ = 'stabilityai/stable-diffusion-2-base'
lowercase_ = DDIMScheduler.from_pretrained(_snake_case , subfolder='''scheduler''' )
lowercase_ = StableDiffusionPanoramaPipeline.from_pretrained(_snake_case , scheduler=_snake_case , safety_checker=_snake_case )
lowercase_ = pipe.to(_snake_case )
pipe.set_progress_bar_config(disable=_snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowercase_ = self.get_inputs()
lowercase_ = pipe(**_snake_case )
lowercase_ = torch.cuda.max_memory_allocated()
# make sure that less than 5.2 GB is allocated
assert mem_bytes < 5.5 * 1_0**9
| 30 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Dict = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """openai-gpt"""
_SCREAMING_SNAKE_CASE = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Optional[int], _snake_case : Dict=4_0_4_7_8, _snake_case : str=5_1_2, _snake_case : int=7_6_8, _snake_case : Tuple=1_2, _snake_case : Any=1_2, _snake_case : str="gelu", _snake_case : List[str]=0.1, _snake_case : Any=0.1, _snake_case : Dict=0.1, _snake_case : int=1e-5, _snake_case : Optional[Any]=0.0_2, _snake_case : List[Any]="cls_index", _snake_case : Any=True, _snake_case : Any=None, _snake_case : int=True, _snake_case : Optional[Any]=0.1, **_snake_case : List[Any], ) ->Optional[int]:
snake_case__ : int = vocab_size
snake_case__ : Dict = n_positions
snake_case__ : str = n_embd
snake_case__ : str = n_layer
snake_case__ : List[Any] = n_head
snake_case__ : List[Any] = afn
snake_case__ : Optional[Any] = resid_pdrop
snake_case__ : List[str] = embd_pdrop
snake_case__ : List[Any] = attn_pdrop
snake_case__ : Optional[int] = layer_norm_epsilon
snake_case__ : str = initializer_range
snake_case__ : List[str] = summary_type
snake_case__ : Optional[int] = summary_use_proj
snake_case__ : List[str] = summary_activation
snake_case__ : Optional[Any] = summary_first_dropout
snake_case__ : int = summary_proj_to_labels
super().__init__(**_snake_case )
| 277 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase : Dict = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 353 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __snake_case ( unittest.TestCase ):
def __a ( self : str ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 3
SCREAMING_SNAKE_CASE__ = (32, 32)
SCREAMING_SNAKE_CASE__ = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(_lowercase )
return image
@property
def __a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=_lowercase , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def __a ( self : Optional[int] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
return model
@property
def __a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act="""gelu""" , projection_dim=5_12 , )
return CLIPTextModel(_lowercase )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=_lowercase , low_res_scheduler=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , return_dict=_lowercase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.31_13, 0.39_10, 0.42_72, 0.48_59, 0.50_61, 0.46_52, 0.53_62, 0.57_15, 0.56_61] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = """cpu""" # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=_lowercase , low_res_scheduler=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_lowercase ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" , """This test requires a GPU""" )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE__ = DDPMScheduler()
SCREAMING_SNAKE_CASE__ = DDIMScheduler(prediction_type="""v_prediction""" )
SCREAMING_SNAKE_CASE__ = self.dummy_vae
SCREAMING_SNAKE_CASE__ = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_lowercase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE__ = unet.half()
SCREAMING_SNAKE_CASE__ = text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline(
unet=_lowercase , low_res_scheduler=_lowercase , scheduler=_lowercase , vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , max_noise_level=3_50 , )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_lowercase )
sd_pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ = """A painting of a squirrel eating a burger"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = sd_pipe(
[prompt] , image=_lowercase , generator=_lowercase , num_inference_steps=2 , output_type="""np""" , ).images
SCREAMING_SNAKE_CASE__ = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : Optional[int] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
SCREAMING_SNAKE_CASE__ = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(_lowercase )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = """a cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=_lowercase , image=_lowercase , generator=_lowercase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = """a cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=_lowercase , image=_lowercase , generator=_lowercase , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __a ( self : Any ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
SCREAMING_SNAKE_CASE__ = """stabilityai/stable-diffusion-x4-upscaler"""
SCREAMING_SNAKE_CASE__ = StableDiffusionUpscalePipeline.from_pretrained(
_lowercase , torch_dtype=torch.floataa , )
pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = """a cat sitting on a park bench"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe(
prompt=_lowercase , image=_lowercase , generator=_lowercase , num_inference_steps=5 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 204 | 0 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=_A )
class UpperCAmelCase_ ( _A ):
'''simple docstring'''
a__ = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
a__ = Features({"""text""": Value("""string""" )} )
a__ = Features({"""labels""": ClassLabel} )
a__ = "text"
a__ = "labels"
def _lowercase ( self : Tuple , UpperCamelCase__ : Dict ) -> Dict:
"""simple docstring"""
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCamelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
__magic_name__ = copy.deepcopy(self )
__magic_name__ = self.label_schema.copy()
__magic_name__ = features[self.label_column]
__magic_name__ = label_schema
return task_template
@property
def _lowercase ( self : Optional[int] ) -> Dict[str, str]:
"""simple docstring"""
return {
self.text_column: "text",
self.label_column: "labels",
}
| 88 |
from __future__ import annotations
lowercase__ :Any = 1.60_21E-19 # units = C
def UpperCamelCase ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ):
'''simple docstring'''
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif conductivity < 0:
raise ValueError('''Conductivity cannot be negative''' )
elif electron_conc < 0:
raise ValueError('''Electron concentration cannot be negative''' )
elif mobility < 0:
raise ValueError('''mobility cannot be negative''' )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 101 | 0 |
"""simple docstring"""
from __future__ import annotations
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ) -> None:
A__ = order
# a_{0} ... a_{k}
A__ = [1.0] + [0.0] * order
# b_{0} ... b_{k}
A__ = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
A__ = [0.0] * self.order
# y[n-1] ... y[n-k]
A__ = [0.0] * self.order
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> None:
if len(__UpperCAmelCase ) < self.order:
A__ = [1.0, *a_coeffs]
if len(__UpperCAmelCase ) != self.order + 1:
A__ = (
f'''Expected a_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCAmelCase )}'''
)
raise ValueError(__UpperCAmelCase )
if len(__UpperCAmelCase ) != self.order + 1:
A__ = (
f'''Expected b_coeffs to have {self.order + 1} elements '''
f'''for {self.order}-order filter, got {len(__UpperCAmelCase )}'''
)
raise ValueError(__UpperCAmelCase )
A__ = a_coeffs
A__ = b_coeffs
def snake_case__ ( self ,__UpperCAmelCase ) -> float:
A__ = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 ,self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
A__ = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
A__ = self.input_history[:-1]
A__ = self.output_history[:-1]
A__ = sample
A__ = result
return result
| 154 | """simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
__lowerCamelCase = 20_48
__lowerCamelCase = 40_96
__lowerCamelCase = 42
__lowerCamelCase = os.environ.pop("PROCESS_TRAIN", "false")
__lowerCamelCase = {"null": 0, "short": 1, "long": 2, "yes": 3, "no": 4}
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
def choose_first(UpperCamelCase__ , UpperCamelCase__=False ):
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
if len(UpperCamelCase__ ) == 1:
A__ = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
A__ = {k: [a[k]] for k in a}
if len(a['start_token'] ) > 0:
break
return a
A__ = {'id': example['id']}
A__ = example['annotations']
A__ = annotation['yes_no_answer']
if 0 in yes_no_answer or 1 in yes_no_answer:
A__ = ['yes'] if 1 in yes_no_answer else ['no']
A__ = A__ = []
A__ = A__ = []
A__ = ['<cls>']
else:
A__ = ['short']
A__ = choose_first(annotation['short_answers'] )
if len(out['start_token'] ) == 0:
# answer will be long if short is not available
A__ = ['long']
A__ = choose_first(annotation['long_answer'] , is_long_answer=UpperCamelCase__ )
A__ = []
answer.update(UpperCamelCase__ )
# disregard some samples
if len(answer['start_token'] ) > 1 or answer["start_token"] == answer["end_token"]:
A__ = True
else:
A__ = False
A__ = ['start_token', 'end_token', 'start_byte', 'end_byte', 'text']
if not all(isinstance(answer[k] , UpperCamelCase__ ) for k in cols ):
raise ValueError('Issue in ID' , example['id'] )
return answer
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
A__ = _get_single_answer(UpperCamelCase__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = example['document']['tokens']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
return {
"context": " ".join(UpperCamelCase__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
A__ = ['start_token', 'end_token']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
A__ = example['document']['tokens']
A__ = answer['start_token']
A__ = answer['end_token']
A__ = []
for i in range(len(doc['token'] ) ):
if not doc["is_html"][i]:
context.append(doc['token'][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
A__ = ' '.join(context[start_token:end_token] )
# checking above code
if assertion:
A__ = doc['is_html'][answer['start_token'] : answer['end_token']]
A__ = doc['token'][answer['start_token'] : answer['end_token']]
A__ = ' '.join([old[i] for i in range(len(UpperCamelCase__ ) ) if not is_html[i]] )
if new != old:
print('ID:' , example['id'] )
print('New:' , UpperCamelCase__ , end='\n' )
print('Old:' , UpperCamelCase__ , end='\n\n' )
return {
"context": " ".join(UpperCamelCase__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=2_048 , UpperCamelCase__=4_096 , UpperCamelCase__=True ):
"""simple docstring"""
A__ = get_context_and_ans(UpperCamelCase__ , assertion=UpperCamelCase__ )
A__ = out['answer']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
A__ = tokenizer(example['question']['text'] , out['context'] ).input_ids
A__ = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
A__ = []
A__ = []
A__ = input_ids[:q_len]
A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride )
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['category'][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(UpperCamelCase__ ),
"end_token": [-100] * len(UpperCamelCase__ ),
"category": category,
},
}
A__ = out['context'].split()
A__ = splitted_context[answer['end_token']]
A__ = len(
tokenizer(
' '.join(splitted_context[: answer['start_token']] ) , add_special_tokens=UpperCamelCase__ , ).input_ids )
A__ = len(
tokenizer(' '.join(splitted_context[: answer['end_token']] ) , add_special_tokens=UpperCamelCase__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
A__ = len(tokenizer(UpperCamelCase__ , add_special_tokens=UpperCamelCase__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
A__ = input_ids[answer['start_token'] : answer['end_token'] + 1] # right & left are inclusive
A__ = answer['start_token']
A__ = answer['end_token']
if assertion:
A__ = tokenizer.decode(UpperCamelCase__ )
if answer["span"] != new:
print('ISSUE IN TOKENIZATION' )
print('OLD:' , answer['span'] )
print('NEW:' , UpperCamelCase__ , end='\n\n' )
if len(UpperCamelCase__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
A__ = input_ids[:q_len]
A__ = range(UpperCamelCase__ , len(UpperCamelCase__ ) , max_length - doc_stride )
A__ = []
A__ = []
A__ = []
A__ = [] # null, yes, no, long, short
for i in doc_start_indices:
A__ = i + max_length - q_len
A__ = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
A__ = start_token - i + q_len
A__ = end_token - i + q_len
answers_category.append(answer['category'][0] ) # ["short"] -> "short"
else:
A__ = -100
A__ = -100
answers_category.append('null' )
A__ = inputs[-1][start_token : end_token + 1]
answers_start_token.append(UpperCamelCase__ )
answers_end_token.append(UpperCamelCase__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('ISSUE in strided for ID:' , example['id'] )
print('New:' , tokenizer.decode(UpperCamelCase__ ) )
print('Old:' , tokenizer.decode(UpperCamelCase__ ) , end='\n\n' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=2_048 , UpperCamelCase__=4_096 , UpperCamelCase__=False ):
"""simple docstring"""
A__ = get_strided_contexts_and_ans(
UpperCamelCase__ , UpperCamelCase__ , doc_stride=UpperCamelCase__ , max_length=UpperCamelCase__ , assertion=UpperCamelCase__ , )
return example
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
with jsonlines.open(UpperCamelCase__ , 'a' ) as writer:
for example in tqdm(UpperCamelCase__ , total=len(UpperCamelCase__ ) , desc='Saving samples ... ' ):
A__ = example['labels']
for ids, start, end, cat in zip(
example['input_ids'] , labels['start_token'] , labels['end_token'] , labels['category'] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'input_ids': ids,
'start_token': start,
'end_token': end,
'category': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
__lowerCamelCase = load_dataset("natural_questions")
__lowerCamelCase = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base")
__lowerCamelCase = data["train" if PROCESS_TRAIN == "true" else "validation"]
__lowerCamelCase = {
"tokenizer": tokenizer,
"doc_stride": DOC_STRIDE,
"max_length": MAX_LENGTH,
"assertion": False,
}
__lowerCamelCase = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
__lowerCamelCase = data.remove_columns(["annotations", "document", "id", "question"])
print(data)
np.random.seed(SEED)
__lowerCamelCase = "nq-training.jsonl" if PROCESS_TRAIN == "true" else "nq-validation.jsonl"
save_to_disk(data, file_name=cache_file_name)
| 154 | 1 |
import warnings
from diffusers import StableDiffusionImgaImgPipeline # noqa F401
warnings.warn(
'''The `image_to_image.py` script is outdated. Please use directly `from diffusers import'''
''' StableDiffusionImg2ImgPipeline` instead.'''
)
| 87 | from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
'''YituTech/conv-bert-base''': '''https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json''',
'''YituTech/conv-bert-medium-small''': (
'''https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'''
),
'''YituTech/conv-bert-small''': '''https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json''',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class snake_case_ ( __A ):
__A : List[str] = "convbert"
def __init__( self : Union[str, Any] , lowercase_ : str=3_05_22 , lowercase_ : Any=7_68 , lowercase_ : Tuple=12 , lowercase_ : List[str]=12 , lowercase_ : Optional[int]=30_72 , lowercase_ : Union[str, Any]="gelu" , lowercase_ : str=0.1 , lowercase_ : List[str]=0.1 , lowercase_ : Optional[Any]=5_12 , lowercase_ : Dict=2 , lowercase_ : Union[str, Any]=0.02 , lowercase_ : Optional[Any]=1E-12 , lowercase_ : Optional[int]=1 , lowercase_ : List[Any]=0 , lowercase_ : Optional[int]=2 , lowercase_ : str=7_68 , lowercase_ : Dict=2 , lowercase_ : Optional[Any]=9 , lowercase_ : Union[str, Any]=1 , lowercase_ : Any=None , **lowercase_ : Optional[Any] , ) -> Dict:
super().__init__(
pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ , )
lowercase__ : List[str] = vocab_size
lowercase__ : Union[str, Any] = hidden_size
lowercase__ : Any = num_hidden_layers
lowercase__ : List[str] = num_attention_heads
lowercase__ : Union[str, Any] = intermediate_size
lowercase__ : Optional[Any] = hidden_act
lowercase__ : int = hidden_dropout_prob
lowercase__ : str = attention_probs_dropout_prob
lowercase__ : Union[str, Any] = max_position_embeddings
lowercase__ : Optional[int] = type_vocab_size
lowercase__ : Tuple = initializer_range
lowercase__ : List[str] = layer_norm_eps
lowercase__ : List[Any] = embedding_size
lowercase__ : Optional[Any] = head_ratio
lowercase__ : Dict = conv_kernel_size
lowercase__ : Tuple = num_groups
lowercase__ : Optional[int] = classifier_dropout
class snake_case_ ( __A ):
@property
def __UpperCamelCase ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
lowercase__ : Tuple = {0: "batch", 1: "choice", 2: "sequence"}
else:
lowercase__ : str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 87 | 1 |
from __future__ import annotations
snake_case : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Any = graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : dict[str, str | None] = {}
__magic_name__ : List[str] = source_vertex
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.source_vertex}
__magic_name__ : Optional[int] = None
__magic_name__ : int = [self.source_vertex] # first in first out queue
while queue:
__magic_name__ : Optional[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_a )
__magic_name__ : Dict = vertex
queue.append(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : str = self.parent.get(_a )
if target_vertex_parent is None:
__magic_name__ : Union[str, Any] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_a )
return self.shortest_path(_a ) + f'''->{target_vertex}'''
if __name__ == "__main__":
snake_case : int = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 41 |
from __future__ import annotations
snake_case : Optional[int] = {
"A": ["B", "C", "E"],
"B": ["A", "D", "E"],
"C": ["A", "F", "G"],
"D": ["B"],
"E": ["A", "B", "D"],
"F": ["C"],
"G": ["C"],
}
class _snake_case :
def __init__( self , _a , _a ):
__magic_name__ : Any = graph
# mapping node to its parent in resulting breadth first tree
__magic_name__ : dict[str, str | None] = {}
__magic_name__ : List[str] = source_vertex
def SCREAMING_SNAKE_CASE ( self ):
__magic_name__ : List[str] = {self.source_vertex}
__magic_name__ : Optional[int] = None
__magic_name__ : int = [self.source_vertex] # first in first out queue
while queue:
__magic_name__ : Optional[Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_a )
__magic_name__ : Dict = vertex
queue.append(_a )
def SCREAMING_SNAKE_CASE ( self , _a ):
if target_vertex == self.source_vertex:
return self.source_vertex
__magic_name__ : str = self.parent.get(_a )
if target_vertex_parent is None:
__magic_name__ : Union[str, Any] = (
f'''No path from vertex: {self.source_vertex} to vertex: {target_vertex}'''
)
raise ValueError(_a )
return self.shortest_path(_a ) + f'''->{target_vertex}'''
if __name__ == "__main__":
snake_case : int = Graph(graph, "G")
g.breath_first_search()
print(g.shortest_path("D"))
print(g.shortest_path("G"))
print(g.shortest_path("Foo"))
| 41 | 1 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Dict:
if len(_lowerCAmelCase ) <= 1:
return lst
lowerCamelCase__ : List[Any] = 1
while i < len(_lowerCAmelCase ):
if lst[i - 1] <= lst[i]:
i += 1
else:
lowerCamelCase__ , lowerCamelCase__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
lowerCamelCase__ : Tuple = 1
return lst
if __name__ == "__main__":
_A : Tuple =input('''Enter numbers separated by a comma:\n''').strip()
_A : List[Any] =[int(item) for item in user_input.split(''',''')]
print(gnome_sort(unsorted))
| 41 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def _lowerCAmelCase (_lowerCAmelCase , _lowerCAmelCase):
UpperCamelCase_ = []
for part_id in partition_order:
UpperCamelCase_ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""").collect()
for row_idx, row in enumerate(_lowerCAmelCase):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(2)
UpperCamelCase_ = [1, 0]
UpperCamelCase_ = _generate_iterable_examples(_lowerCAmelCase , _lowerCAmelCase) # Reverse the partitions.
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , _lowerCAmelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(10).repartition(1)
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator") as generator_mock:
UpperCamelCase_ = lambda _lowerCAmelCase: x.reverse()
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [2, 1, 0])
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shuffle_data_sources(_lowerCAmelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(20).repartition(4)
# Partitions 0 and 2
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
UpperCamelCase_ = SparkExamplesIterable(_lowerCAmelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
UpperCamelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(_lowerCAmelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(_lowerCAmelCase):
UpperCamelCase_ , UpperCamelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def _lowerCAmelCase ():
UpperCamelCase_ = pyspark.sql.SparkSession.builder.master("local[*]").appName("pyspark").getOrCreate()
UpperCamelCase_ = spark.range(1_00).repartition(1)
UpperCamelCase_ = Spark(_lowerCAmelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 1_00
| 128 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
def __init__( self , *snake_case__ , **snake_case__ ):
"""simple docstring"""
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , snake_case__ , )
super().__init__(*snake_case__ , **snake_case__ )
| 133 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : list[int] ):
'''simple docstring'''
lowerCAmelCase : str = len(SCREAMING_SNAKE_CASE )
for i in range(SCREAMING_SNAKE_CASE ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE ):
if numbers[j] < numbers[i]:
lowerCAmelCase , lowerCAmelCase : Any = numbers[j], numbers[i]
return numbers
if __name__ == "__main__":
lowerCAmelCase__ = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase__ = [int(item) for item in user_input.split(''',''')]
print(exchange_sort(unsorted))
| 133 | 1 |
import pyarrow.parquet as pq
import pytest
from datasets import Audio, Dataset, DatasetDict, Features, NamedSplit, Sequence, Value, config
from datasets.features.image import Image
from datasets.io.parquet import ParquetDatasetReader, ParquetDatasetWriter, get_writer_batch_size
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Any:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : str = tmp_path / 'cache'
__lowerCamelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : Tuple = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : Dict = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Union[str, Any] = features.copy() if features else default_expected_features
__lowerCamelCase : Union[str, Any] = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Dict = ParquetDatasetReader(lowerCamelCase__ , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> str:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : Optional[int] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Tuple = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ , split=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
if issubclass(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Optional[int] = parquet_path
elif issubclass(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : Dict = [parquet_path]
__lowerCamelCase : List[Any] = tmp_path / 'cache'
__lowerCamelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Dict = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_dataset(lowerCamelCase__ , lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__=("train",) ) -> List[str]:
assert isinstance(lowerCamelCase__ , lowerCamelCase__ )
for split in splits:
__lowerCamelCase : Dict = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Optional[Any]:
__lowerCamelCase : Union[str, Any] = tmp_path / 'cache'
__lowerCamelCase : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__lowerCamelCase : List[str] = ParquetDatasetReader(
{'train': parquet_path} , cache_dir=lowerCamelCase__ , keep_in_memory=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Tuple = tmp_path / 'cache'
__lowerCamelCase : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : List[str] = features.copy() if features else default_expected_features
__lowerCamelCase : str = (
Features({feature: Value(lowerCamelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__lowerCamelCase : Union[str, Any] = ParquetDatasetReader({'train': parquet_path} , features=lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
if split:
__lowerCamelCase : List[str] = {split: parquet_path}
else:
__lowerCamelCase : List[str] = 'train'
__lowerCamelCase : List[Any] = {'train': parquet_path, 'test': parquet_path}
__lowerCamelCase : Tuple = tmp_path / 'cache'
__lowerCamelCase : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
__lowerCamelCase : Optional[Any] = ParquetDatasetReader(lowerCamelCase__ , cache_dir=lowerCamelCase__ ).read()
_check_parquet_datasetdict(lowerCamelCase__ , lowerCamelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
__lowerCamelCase : Union[str, Any] = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : Union[str, Any] = pq.ParquetFile(tmp_path / 'foo.parquet' )
__lowerCamelCase : List[str] = pf.read()
assert dataset.data.table == output_table
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[Any]:
__lowerCamelCase : List[str] = str(shared_datadir / 'test_image_rgb.jpg' )
__lowerCamelCase : Optional[int] = {'image': [image_path]}
__lowerCamelCase : int = Features({'image': Image()} )
__lowerCamelCase : Union[str, Any] = Dataset.from_dict(lowerCamelCase__ , features=lowerCamelCase__ )
__lowerCamelCase : Any = ParquetDatasetWriter(lowerCamelCase__ , tmp_path / 'foo.parquet' )
assert writer.write() > 0
__lowerCamelCase : int = Dataset.from_parquet(str(tmp_path / 'foo.parquet' ) )
assert dataset.features == reloaded_dataset.features
__lowerCamelCase : Dict = ParquetDatasetReader(str(tmp_path / 'foo.parquet' ) , streaming=lowerCamelCase__ ).read()
assert dataset.features == reloaded_iterable_dataset.features
@pytest.mark.parametrize(
'feature, expected' , [
(Features({'foo': Value('int32' )} ), None),
(Features({'image': Image(), 'foo': Value('int32' )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS),
(Features({'nested': Sequence(Audio() )} ), config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS),
] , )
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ ) -> List[str]:
assert get_writer_batch_size(lowerCamelCase__ ) == expected
| 73 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( _lowercase):
_a : Union[str, Any] = ['''image_processor''', '''tokenizer''']
_a : List[Any] = '''ChineseCLIPImageProcessor'''
_a : List[Any] = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : List[str]=None , _SCREAMING_SNAKE_CASE : Dict=None , **_SCREAMING_SNAKE_CASE : Optional[Any] )-> List[str]:
lowerCAmelCase__ : Dict = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Union[str, Any] = kwargs.pop('''feature_extractor''' )
lowerCAmelCase__ : Optional[int] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = self.image_processor
def __call__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Optional[int]=None , _SCREAMING_SNAKE_CASE : List[Any]=None , **_SCREAMING_SNAKE_CASE : Dict )-> List[Any]:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
lowerCAmelCase__ : List[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
lowerCAmelCase__ : Dict = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
lowerCAmelCase__ : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , *_SCREAMING_SNAKE_CASE : int , **_SCREAMING_SNAKE_CASE : Optional[int] )-> Any:
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : str , *_SCREAMING_SNAKE_CASE : Tuple , **_SCREAMING_SNAKE_CASE : Any )-> int:
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : Any = self.tokenizer.model_input_names
lowerCAmelCase__ : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase__( self : str )-> List[str]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
| 131 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( *lowercase_ : List[str] , **lowercase_ : str):
'''simple docstring'''
pass
@is_pipeline_test
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Any = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , )
SCREAMING_SNAKE_CASE_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_classifier(lowercase_ , candidate_labels=['''a''', '''b''', '''c'''])
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(lowercase_) , [
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}],
[{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''c'''}, {'''score''': 0.3_33, '''label''': '''b'''}],
] , )
SCREAMING_SNAKE_CASE_ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2)
self.assertEqual(
nested_simplify(lowercase_) , [
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
] , )
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Any):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : str = pipeline(
model='''hf-internal-testing/tiny-random-clip-zero-shot-image-classification''' , framework='''tf''')
SCREAMING_SNAKE_CASE_ : Any = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
SCREAMING_SNAKE_CASE_ : int = image_classifier(lowercase_ , candidate_labels=['''a''', '''b''', '''c'''])
self.assertEqual(
nested_simplify(lowercase_) , [{'''score''': 0.3_33, '''label''': '''a'''}, {'''score''': 0.3_33, '''label''': '''b'''}, {'''score''': 0.3_33, '''label''': '''c'''}] , )
SCREAMING_SNAKE_CASE_ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''A''', '''B''', '''C'''] , batch_size=2)
self.assertEqual(
nested_simplify(lowercase_) , [
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
[
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
{'''score''': 0.3_33, '''label''': ANY(lowercase_)},
],
] , )
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : Optional[Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , )
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE_ : Tuple = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
SCREAMING_SNAKE_CASE_ : int = image_classifier(lowercase_ , candidate_labels=['''cat''', '''plane''', '''remote'''])
self.assertEqual(
nested_simplify(lowercase_) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2)
self.assertEqual(
nested_simplify(lowercase_) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
@slow
@require_tf
def _SCREAMING_SNAKE_CASE ( self : Optional[Any]):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = pipeline(
task='''zero-shot-image-classification''' , model='''openai/clip-vit-base-patch32''' , framework='''tf''')
# This is an image of 2 cats with remotes and no planes
SCREAMING_SNAKE_CASE_ : List[str] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_classifier(lowercase_ , candidate_labels=['''cat''', '''plane''', '''remote'''])
self.assertEqual(
nested_simplify(lowercase_) , [
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
] , )
SCREAMING_SNAKE_CASE_ : List[Any] = image_classifier([image] * 5 , candidate_labels=['''cat''', '''plane''', '''remote'''] , batch_size=2)
self.assertEqual(
nested_simplify(lowercase_) , [
[
{'''score''': 0.5_11, '''label''': '''remote'''},
{'''score''': 0.4_85, '''label''': '''cat'''},
{'''score''': 0.0_04, '''label''': '''plane'''},
],
]
* 5 , )
| 318 |
"""simple docstring"""
import argparse
from pathlib import Path
from typing import Dict, OrderedDict, Tuple
import torch
from audiocraft.models import MusicGen
from transformers import (
AutoFeatureExtractor,
AutoTokenizer,
EncodecModel,
MusicgenDecoderConfig,
MusicgenForConditionalGeneration,
MusicgenProcessor,
TaEncoderModel,
)
from transformers.models.musicgen.modeling_musicgen import MusicgenForCausalLM
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase_ : str = logging.get_logger(__name__)
UpperCAmelCase_ : Optional[Any] = ["""model.decoder.embed_positions.weights"""]
def _A (__a ) -> Dict:
"""simple docstring"""
if "emb" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''emb''' , '''model.decoder.embed_tokens''' )
if "transformer" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''transformer''' , '''model.decoder''' )
if "cross_attention" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''cross_attention''' , '''encoder_attn''' )
if "linear1" in name:
SCREAMING_SNAKE_CASE_ : Optional[int] = name.replace('''linear1''' , '''fc1''' )
if "linear2" in name:
SCREAMING_SNAKE_CASE_ : str = name.replace('''linear2''' , '''fc2''' )
if "norm1" in name:
SCREAMING_SNAKE_CASE_ : Any = name.replace('''norm1''' , '''self_attn_layer_norm''' )
if "norm_cross" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''norm_cross''' , '''encoder_attn_layer_norm''' )
if "norm2" in name:
SCREAMING_SNAKE_CASE_ : Tuple = name.replace('''norm2''' , '''final_layer_norm''' )
if "out_norm" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''out_norm''' , '''model.decoder.layer_norm''' )
if "linears" in name:
SCREAMING_SNAKE_CASE_ : Dict = name.replace('''linears''' , '''lm_heads''' )
if "condition_provider.conditioners.description.output_proj" in name:
SCREAMING_SNAKE_CASE_ : List[str] = name.replace('''condition_provider.conditioners.description.output_proj''' , '''enc_to_dec_proj''' )
return name
def _A (__a , __a ) -> Tuple[Dict, Dict]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = list(state_dict.keys() )
SCREAMING_SNAKE_CASE_ : int = {}
for key in keys:
SCREAMING_SNAKE_CASE_ : int = state_dict.pop(__a )
SCREAMING_SNAKE_CASE_ : int = rename_keys(__a )
if "in_proj_weight" in key:
# split fused qkv proj
SCREAMING_SNAKE_CASE_ : List[str] = val[:hidden_size, :]
SCREAMING_SNAKE_CASE_ : List[str] = val[hidden_size : 2 * hidden_size, :]
SCREAMING_SNAKE_CASE_ : Optional[Any] = val[-hidden_size:, :]
elif "enc_to_dec_proj" in key:
SCREAMING_SNAKE_CASE_ : int = val
else:
SCREAMING_SNAKE_CASE_ : Any = val
return state_dict, enc_dec_proj_state_dict
def _A (__a ) -> MusicgenDecoderConfig:
"""simple docstring"""
if checkpoint == "small":
# default config values
SCREAMING_SNAKE_CASE_ : Optional[int] = 10_24
SCREAMING_SNAKE_CASE_ : Tuple = 24
SCREAMING_SNAKE_CASE_ : Optional[Any] = 16
elif checkpoint == "medium":
SCREAMING_SNAKE_CASE_ : List[str] = 15_36
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : Optional[int] = 24
elif checkpoint == "large":
SCREAMING_SNAKE_CASE_ : Optional[Any] = 20_48
SCREAMING_SNAKE_CASE_ : Optional[int] = 48
SCREAMING_SNAKE_CASE_ : int = 32
else:
raise ValueError(f'Checkpoint should be one of `[\'small\', \'medium\', \'large\']`, got {checkpoint}.' )
SCREAMING_SNAKE_CASE_ : List[Any] = MusicgenDecoderConfig(
hidden_size=__a , ffn_dim=hidden_size * 4 , num_hidden_layers=__a , num_attention_heads=__a , )
return config
@torch.no_grad()
def _A (__a , __a=None , __a=None , __a="cpu" ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = MusicGen.get_pretrained(__a , device=__a )
SCREAMING_SNAKE_CASE_ : Dict = decoder_config_from_checkpoint(__a )
SCREAMING_SNAKE_CASE_ : Optional[Any] = fairseq_model.lm.state_dict()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : int = rename_state_dict(
__a , hidden_size=decoder_config.hidden_size )
SCREAMING_SNAKE_CASE_ : Optional[Any] = TaEncoderModel.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : List[str] = EncodecModel.from_pretrained('''facebook/encodec_32khz''' )
SCREAMING_SNAKE_CASE_ : int = MusicgenForCausalLM(__a ).eval()
# load all decoder weights - expect that we'll be missing embeddings and enc-dec projection
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : List[str] = decoder.load_state_dict(__a , strict=__a )
for key in missing_keys.copy():
if key.startswith(('''text_encoder''', '''audio_encoder''') ) or key in EXPECTED_MISSING_KEYS:
missing_keys.remove(__a )
if len(__a ) > 0:
raise ValueError(f'Missing key(s) in state_dict: {missing_keys}' )
if len(__a ) > 0:
raise ValueError(f'Unexpected key(s) in state_dict: {unexpected_keys}' )
# init the composite model
SCREAMING_SNAKE_CASE_ : str = MusicgenForConditionalGeneration(text_encoder=__a , audio_encoder=__a , decoder=__a )
# load the pre-trained enc-dec projection (from the decoder state dict)
model.enc_to_dec_proj.load_state_dict(__a )
# check we can do a forward pass
SCREAMING_SNAKE_CASE_ : Dict = torch.arange(0 , 8 , dtype=torch.long ).reshape(2 , -1 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = input_ids.reshape(2 * 4 , -1 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : List[Any] = model(input_ids=__a , decoder_input_ids=__a ).logits
if logits.shape != (8, 1, 20_48):
raise ValueError('''Incorrect shape for logits''' )
# now construct the processor
SCREAMING_SNAKE_CASE_ : str = AutoTokenizer.from_pretrained('''t5-base''' )
SCREAMING_SNAKE_CASE_ : str = AutoFeatureExtractor.from_pretrained('''facebook/encodec_32khz''' , padding_side='''left''' )
SCREAMING_SNAKE_CASE_ : Tuple = MusicgenProcessor(feature_extractor=__a , tokenizer=__a )
# set the appropriate bos/pad token ids
SCREAMING_SNAKE_CASE_ : str = 20_48
SCREAMING_SNAKE_CASE_ : List[Any] = 20_48
# set other default generation config params
SCREAMING_SNAKE_CASE_ : int = int(30 * audio_encoder.config.frame_rate )
SCREAMING_SNAKE_CASE_ : str = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = 3.0
if pytorch_dump_folder is not None:
Path(__a ).mkdir(exist_ok=__a )
logger.info(f'Saving model {checkpoint} to {pytorch_dump_folder}' )
model.save_pretrained(__a )
processor.save_pretrained(__a )
if repo_id:
logger.info(f'Pushing model {checkpoint} to {repo_id}' )
model.push_to_hub(__a )
processor.push_to_hub(__a )
if __name__ == "__main__":
UpperCAmelCase_ : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--checkpoint""",
default="""small""",
type=str,
help="""Checkpoint size of the MusicGen model you'd like to convert. Can be one of: `['small', 'medium', 'large']`.""",
)
parser.add_argument(
"""--pytorch_dump_folder""",
required=True,
default=None,
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
parser.add_argument(
"""--device""", default="""cpu""", type=str, help="""Torch device to run the conversion, either cpu or cuda."""
)
UpperCAmelCase_ : Dict = parser.parse_args()
convert_musicgen_checkpoint(args.checkpoint, args.pytorch_dump_folder, args.push_to_hub)
| 318 | 1 |
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
A_ :str = trt.Logger(trt.Logger.WARNING)
A_ :List[Any] = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
A_ :Tuple = logging.getLogger(__name__)
A_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--onnx_model_path''',
default=None,
type=str,
required=True,
help='''Path to ONNX model: ''',
)
parser.add_argument(
'''--output_dir''',
default=None,
type=str,
required=True,
help='''The output directory where the model checkpoints and predictions will be written.''',
)
# Other parameters
parser.add_argument(
'''--tokenizer_name''',
default='''''',
type=str,
required=True,
help='''Pretrained tokenizer name or path if not the same as model_name''',
)
parser.add_argument(
'''--version_2_with_negative''',
action='''store_true''',
help='''If true, the SQuAD examples contain some that do not have an answer.''',
)
parser.add_argument(
'''--null_score_diff_threshold''',
type=float,
default=0.0,
help='''If null_score - best_non_null is greater than the threshold predict null.''',
)
parser.add_argument(
'''--max_seq_length''',
default=384,
type=int,
help=(
'''The maximum total input sequence length after WordPiece tokenization. Sequences '''
'''longer than this will be truncated, and sequences shorter than this will be padded.'''
),
)
parser.add_argument(
'''--doc_stride''',
default=128,
type=int,
help='''When splitting up a long document into chunks, how much stride to take between chunks.''',
)
parser.add_argument('''--per_device_eval_batch_size''', default=8, type=int, help='''Batch size per GPU/CPU for evaluation.''')
parser.add_argument(
'''--n_best_size''',
default=20,
type=int,
help='''The total number of n-best predictions to generate in the nbest_predictions.json output file.''',
)
parser.add_argument(
'''--max_answer_length''',
default=30,
type=int,
help=(
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
),
)
parser.add_argument('''--seed''', type=int, default=42, help='''random seed for initialization''')
parser.add_argument(
'''--dataset_name''',
type=str,
default=None,
required=True,
help='''The name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--dataset_config_name''',
type=str,
default=None,
help='''The configuration name of the dataset to use (via the datasets library).''',
)
parser.add_argument(
'''--preprocessing_num_workers''', type=int, default=4, help='''A csv or a json file containing the training data.'''
)
parser.add_argument('''--overwrite_cache''', action='''store_true''', help='''Overwrite the cached training and evaluation sets''')
parser.add_argument(
'''--fp16''',
action='''store_true''',
help='''Whether to use 16-bit (mixed) precision instead of 32-bit''',
)
parser.add_argument(
'''--int8''',
action='''store_true''',
help='''Whether to use INT8''',
)
A_ :Optional[Any] = parser.parse_args()
if args.tokenizer_name:
A_ :int = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'''You are instantiating a new tokenizer from scratch. This is not supported by this script.'''
'''You can do it from another script, save it, and load it from here, using --tokenizer_name.'''
)
logger.info('''Training/evaluation parameters %s''', args)
A_ :Dict = args.per_device_eval_batch_size
A_ :Optional[int] = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
A_ :Optional[int] = True
A_ :Tuple = """temp_engine/bert-fp32.engine"""
if args.fpaa:
A_ :Union[str, Any] = """temp_engine/bert-fp16.engine"""
if args.inta:
A_ :List[str] = """temp_engine/bert-int8.engine"""
# import ONNX file
if not os.path.exists('''temp_engine'''):
os.makedirs('''temp_engine''')
A_ :Dict = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, '''rb''') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
A_ :Union[str, Any] = [network.get_input(i) for i in range(network.num_inputs)]
A_ :Any = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
A_ :Tuple = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
A_ :List[Any] = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
A_ :Union[str, Any] = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, '''wb''') as f:
f.write(engine.serialize())
def A ( a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ,a_ ) -> Optional[int]:
__UpperCamelCase : Union[str, Any] =np.asarray(inputs['input_ids'] ,dtype=np.intaa )
__UpperCamelCase : Union[str, Any] =np.asarray(inputs['attention_mask'] ,dtype=np.intaa )
__UpperCamelCase : List[str] =np.asarray(inputs['token_type_ids'] ,dtype=np.intaa )
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] ,input_ids.ravel() ,_A )
cuda.memcpy_htod_async(d_inputs[1] ,attention_mask.ravel() ,_A )
cuda.memcpy_htod_async(d_inputs[2] ,token_type_ids.ravel() ,_A )
# start time
__UpperCamelCase : Any =time.time()
# Run inference
context.execute_async(
bindings=[int(_A ) for d_inp in d_inputs] + [int(_A ), int(_A )] ,stream_handle=stream.handle )
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(_A ,_A ,_A )
cuda.memcpy_dtoh_async(_A ,_A ,_A )
# Synchronize the stream and take time
stream.synchronize()
# end time
__UpperCamelCase : str =time.time()
__UpperCamelCase : int =end_time - start_time
__UpperCamelCase : Tuple =(h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
A_ :int = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''',
datefmt='''%m/%d/%Y %H:%M:%S''',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
A_ :Optional[Any] = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('''Evaluation requires a dataset name''')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
A_ :Dict = raw_datasets["""validation"""].column_names
A_ :List[str] = """question""" if """question""" in column_names else column_names[0]
A_ :Any = """context""" if """context""" in column_names else column_names[1]
A_ :Optional[Any] = """answers""" if """answers""" in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
A_ :Any = tokenizer.padding_side == """right"""
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
A_ :Optional[int] = min(args.max_seq_length, tokenizer.model_max_length)
def A ( a_ ) -> int:
__UpperCamelCase : List[Any] =[q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
__UpperCamelCase : Any =tokenizer(
examples[question_column_name if pad_on_right else context_column_name] ,examples[context_column_name if pad_on_right else question_column_name] ,truncation='only_second' if pad_on_right else 'only_first' ,max_length=_A ,stride=args.doc_stride ,return_overflowing_tokens=_A ,return_offsets_mapping=_A ,padding='max_length' ,)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
__UpperCamelCase : Union[str, Any] =tokenized_examples.pop('overflow_to_sample_mapping' )
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
__UpperCamelCase : Tuple =[]
for i in range(len(tokenized_examples['input_ids'] ) ):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
__UpperCamelCase : Optional[int] =tokenized_examples.sequence_ids(_A )
__UpperCamelCase : List[str] =1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
__UpperCamelCase : List[str] =sample_mapping[i]
tokenized_examples["example_id"].append(examples['id'][sample_index] )
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
__UpperCamelCase : Dict =[
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples['offset_mapping'][i] )
]
return tokenized_examples
A_ :List[str] = raw_datasets["""validation"""]
# Validation Feature Creation
A_ :List[Any] = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='''Running tokenizer on validation dataset''',
)
A_ :Optional[Any] = default_data_collator
A_ :Any = eval_dataset.remove_columns(['''example_id''', '''offset_mapping'''])
A_ :int = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def A ( a_ ,a_ ,a_ ,a_="eval" ) -> str:
__UpperCamelCase : Union[str, Any] =postprocess_qa_predictions(
examples=_A ,features=_A ,predictions=_A ,version_2_with_negative=args.version_2_with_negative ,n_best_size=args.n_best_size ,max_answer_length=args.max_answer_length ,null_score_diff_threshold=args.null_score_diff_threshold ,output_dir=args.output_dir ,prefix=_A ,)
# Format the result to the format the metric expects.
if args.version_2_with_negative:
__UpperCamelCase : List[Any] =[
{'id': k, 'prediction_text': v, 'no_answer_probability': 0.0} for k, v in predictions.items()
]
else:
__UpperCamelCase : List[Any] =[{'id': k, 'prediction_text': v} for k, v in predictions.items()]
__UpperCamelCase : Any =[{'id': ex['id'], 'answers': ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=_A ,label_ids=_A )
A_ :Union[str, Any] = load_metric('''squad_v2''' if args.version_2_with_negative else '''squad''')
# Evaluation!
logger.info('''Loading ONNX model %s for evaluation''', args.onnx_model_path)
with open(engine_name, '''rb''') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def A ( a_ ) -> int:
return trt.volume(engine.get_binding_shape(_A ) ) * engine.get_binding_dtype(_A ).itemsize
# Allocate device memory for inputs and outputs.
A_ :Union[str, Any] = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
A_ :str = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
A_ :Any = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
A_ :str = cuda.mem_alloc(h_outputa.nbytes)
A_ :List[Any] = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
A_ :Tuple = cuda.Stream()
# Evaluation
logger.info('''***** Running Evaluation *****''')
logger.info(f" Num examples = {len(eval_dataset)}")
logger.info(f" Batch size = {args.per_device_eval_batch_size}")
A_ :Optional[int] = 0.0
A_ :Tuple = 0
A_ :str = timeit.default_timer()
A_ :str = None
for step, batch in enumerate(eval_dataloader):
A_ :List[Any] = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
A_ :Any = outputs
A_ :Union[str, Any] = torch.tensor(start_logits)
A_ :List[Any] = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
A_ :Any = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
A_ :Any = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
A_ :Union[str, Any] = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
A_ :List[str] = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
A_ :int = nested_truncate(all_preds, len(eval_dataset))
A_ :int = timeit.default_timer() - start_time
logger.info(''' Evaluation done in total %f secs (%f sec per example)''', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('''Average Inference Time = {:.3f} ms'''.format(total_time * 1000 / niter))
logger.info('''Total Inference Time = {:.3f} ms'''.format(total_time * 1000))
logger.info('''Total Number of Inference = %d''', niter)
A_ :Tuple = post_processing_function(eval_examples, eval_dataset, all_preds)
A_ :Tuple = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"Evaluation metrics: {eval_metric}")
| 71 |
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class __SCREAMING_SNAKE_CASE( ctypes.Structure ):
# _fields is a specific attr expected by ctypes
_UpperCAmelCase = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ) -> Any:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ) -> Tuple:
"""simple docstring"""
if os.name == "nt":
snake_case__ = CursorInfo()
snake_case__ = ctypes.windll.kernelaa.GetStdHandle(-11 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(_A , ctypes.byref(_A ) )
snake_case__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(_A , ctypes.byref(_A ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ) -> str:
"""simple docstring"""
try:
hide_cursor()
yield
finally:
show_cursor()
| 307 | 0 |
'''simple docstring'''
from math import pi, sqrt, tan
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError('surface_area_cube() only accepts non-negative values' )
return 6 * side_length**2
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if length < 0 or breadth < 0 or height < 0:
raise ValueError('surface_area_cuboid() only accepts non-negative values' )
return 2 * ((length * breadth) + (breadth * height) + (length * height))
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_sphere() only accepts non-negative values' )
return 4 * pi * radius**2
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
if radius < 0:
raise ValueError('surface_area_hemisphere() only accepts non-negative values' )
return 3 * pi * radius**2
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cone() only accepts non-negative values' )
return pi * radius * (radius + (height**2 + radius**2) ** 0.5)
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if radius_a < 0 or radius_a < 0 or height < 0:
raise ValueError(
'surface_area_conical_frustum() only accepts non-negative values' )
__magic_name__ : Dict = (height**2 + (radius_a - radius_a) ** 2) ** 0.5
return pi * ((slant_height * (radius_a + radius_a)) + radius_a**2 + radius_a**2)
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if radius < 0 or height < 0:
raise ValueError('surface_area_cylinder() only accepts non-negative values' )
return 2 * pi * radius * (height + radius)
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if torus_radius < 0 or tube_radius < 0:
raise ValueError('surface_area_torus() only accepts non-negative values' )
if torus_radius < tube_radius:
raise ValueError(
'surface_area_torus() does not support spindle or self intersecting tori' )
return 4 * pow(lowerCAmelCase , 2 ) * torus_radius * tube_radius
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if length < 0 or width < 0:
raise ValueError('area_rectangle() only accepts non-negative values' )
return length * width
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
if side_length < 0:
raise ValueError('area_square() only accepts non-negative values' )
return side_length**2
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_triangle() only accepts non-negative values' )
return (base * height) / 2
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if sidea < 0 or sidea < 0 or sidea < 0:
raise ValueError('area_triangle_three_sides() only accepts non-negative values' )
elif sidea + sidea < sidea or sidea + sidea < sidea or sidea + sidea < sidea:
raise ValueError('Given three sides do not form a triangle' )
__magic_name__ : str = (sidea + sidea + sidea) / 2
__magic_name__ : Optional[Any] = sqrt(
semi_perimeter
* (semi_perimeter - sidea)
* (semi_perimeter - sidea)
* (semi_perimeter - sidea) )
return area
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if base < 0 or height < 0:
raise ValueError('area_parallelogram() only accepts non-negative values' )
return base * height
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if basea < 0 or basea < 0 or height < 0:
raise ValueError('area_trapezium() only accepts non-negative values' )
return 1 / 2 * (basea + basea) * height
def lowerCamelCase ( lowerCAmelCase : float ):
"""simple docstring"""
if radius < 0:
raise ValueError('area_circle() only accepts non-negative values' )
return pi * radius**2
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if radius_x < 0 or radius_y < 0:
raise ValueError('area_ellipse() only accepts non-negative values' )
return pi * radius_x * radius_y
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if diagonal_a < 0 or diagonal_a < 0:
raise ValueError('area_rhombus() only accepts non-negative values' )
return 1 / 2 * diagonal_a * diagonal_a
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : float ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or sides < 3:
raise ValueError(
'area_reg_polygon() only accepts integers greater than or \
equal to three as number of sides' )
elif length < 0:
raise ValueError(
'area_reg_polygon() only accepts non-negative values as \
length of a side' )
return (sides * length**2) / (4 * tan(pi / sides ))
return (sides * length**2) / (4 * tan(pi / sides ))
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) # verbose so we can see methods missing tests
print('''[DEMO] Areas of various geometric shapes: \n''')
print(F'Rectangle: {area_rectangle(1_0, 2_0) = }')
print(F'Square: {area_square(1_0) = }')
print(F'Triangle: {area_triangle(1_0, 1_0) = }')
print(F'Triangle: {area_triangle_three_sides(5, 1_2, 1_3) = }')
print(F'Parallelogram: {area_parallelogram(1_0, 2_0) = }')
print(F'Rhombus: {area_rhombus(1_0, 2_0) = }')
print(F'Trapezium: {area_trapezium(1_0, 2_0, 3_0) = }')
print(F'Circle: {area_circle(2_0) = }')
print(F'Ellipse: {area_ellipse(1_0, 2_0) = }')
print('''\nSurface Areas of various geometric shapes: \n''')
print(F'Cube: {surface_area_cube(2_0) = }')
print(F'Cuboid: {surface_area_cuboid(1_0, 2_0, 3_0) = }')
print(F'Sphere: {surface_area_sphere(2_0) = }')
print(F'Hemisphere: {surface_area_hemisphere(2_0) = }')
print(F'Cone: {surface_area_cone(1_0, 2_0) = }')
print(F'Conical Frustum: {surface_area_conical_frustum(1_0, 2_0, 3_0) = }')
print(F'Cylinder: {surface_area_cylinder(1_0, 2_0) = }')
print(F'Torus: {surface_area_torus(2_0, 1_0) = }')
print(F'Equilateral Triangle: {area_reg_polygon(3, 1_0) = }')
print(F'Square: {area_reg_polygon(4, 1_0) = }')
print(F'Reqular Pentagon: {area_reg_polygon(5, 1_0) = }') | 275 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
if not all(x.isalpha() for x in string ):
raise ValueError('String must only contain alphabetic characters.' )
__magic_name__ : int = sorted(string.lower() )
return len(lowerCAmelCase ) == len(set(lowerCAmelCase ) )
if __name__ == "__main__":
lowerCAmelCase :Any = input('''Enter a string ''').strip()
lowerCAmelCase :List[Any] = is_isogram(input_str)
print(F'{input_str} is {"an" if isogram else "not an"} isogram.') | 275 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
@property
def _A ( self : List[str] ):
torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _A ( self : int ):
_UpperCAmelCase : Optional[Any] = self.dummy_uncond_unet
_UpperCAmelCase : Any = ScoreSdeVeScheduler()
_UpperCAmelCase : str = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
_UpperCAmelCase : List[str] = torch.manual_seed(0 )
_UpperCAmelCase : List[str] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A ).images
_UpperCAmelCase : Optional[int] = torch.manual_seed(0 )
_UpperCAmelCase : List[Any] = sde_ve(num_inference_steps=2 , output_type="numpy" , generator=A , return_dict=A )[
0
]
_UpperCAmelCase : Tuple = image[0, -3:, -3:, -1]
_UpperCAmelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
_UpperCAmelCase : List[Any] = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : int ):
_UpperCAmelCase : int = "google/ncsnpp-church-256"
_UpperCAmelCase : Any = UNetaDModel.from_pretrained(A )
_UpperCAmelCase : List[Any] = ScoreSdeVeScheduler.from_pretrained(A )
_UpperCAmelCase : Union[str, Any] = ScoreSdeVePipeline(unet=A , scheduler=A )
sde_ve.to(A )
sde_ve.set_progress_bar_config(disable=A )
_UpperCAmelCase : int = torch.manual_seed(0 )
_UpperCAmelCase : Tuple = sde_ve(num_inference_steps=10 , output_type="numpy" , generator=A ).images
_UpperCAmelCase : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
_UpperCAmelCase : int = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 31 |
import string
# frequency taken from https://en.wikipedia.org/wiki/Letter_frequency
__lowerCAmelCase : Optional[int] = {
'E': 12.70,
'T': 9.06,
'A': 8.17,
'O': 7.51,
'I': 6.97,
'N': 6.75,
'S': 6.33,
'H': 6.09,
'R': 5.99,
'D': 4.25,
'L': 4.03,
'C': 2.78,
'U': 2.76,
'M': 2.41,
'W': 2.36,
'F': 2.23,
'G': 2.02,
'Y': 1.97,
'P': 1.93,
'B': 1.29,
'V': 0.98,
'K': 0.77,
'J': 0.15,
'X': 0.15,
'Q': 0.10,
'Z': 0.07,
}
__lowerCAmelCase : Optional[Any] = 'ETAOINSHRDLCUMWFGYPBVKJXQZ'
__lowerCAmelCase : Optional[Any] = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = {letter: 0 for letter in string.ascii_uppercase}
for letter in message.upper():
if letter in LETTERS:
letter_count[letter] += 1
return letter_count
def a__ ( A_ ):
'''simple docstring'''
return x[0]
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_letter_count(A_ )
__magic_name__ = {
freq: [] for letter, freq in letter_to_freq.items()
}
for letter in LETTERS:
freq_to_letter[letter_to_freq[letter]].append(A_ )
__magic_name__ = {}
for freq in freq_to_letter:
freq_to_letter[freq].sort(key=ETAOIN.find, reverse=A_ )
__magic_name__ = """""".join(freq_to_letter[freq] )
__magic_name__ = list(freq_to_letter_str.items() )
freq_pairs.sort(key=A_, reverse=A_ )
__magic_name__ = [freq_pair[1] for freq_pair in freq_pairs]
return "".join(A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = get_frequency_order(A_ )
__magic_name__ = 0
for common_letter in ETAOIN[:6]:
if common_letter in freq_order[:6]:
match_score += 1
for uncommon_letter in ETAOIN[-6:]:
if uncommon_letter in freq_order[-6:]:
match_score += 1
return match_score
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 | 0 |
'''simple docstring'''
import math
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
if (
not isinstance(_lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * power_factor
def __snake_case( _lowerCAmelCase , _lowerCAmelCase ) -> float:
if (
not isinstance(_lowerCAmelCase , (int, float) )
or power_factor < -1
or power_factor > 1
):
raise ValueError("""power_factor must be a valid float value between -1 and 1.""" )
return apparent_power * math.sqrt(1 - power_factor**2 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 359 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class UpperCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , snake_case_ : str , snake_case_ : Dict=7 , snake_case_ : str=3 , snake_case_ : List[str]=18 , snake_case_ : Tuple=30 , snake_case_ : int=400 , snake_case_ : Any=True , snake_case_ : List[str]=None , snake_case_ : List[str]=True , snake_case_ : Union[str, Any]=None , snake_case_ : Dict=True , ):
snake_case__ : List[str] = size if size is not None else {"""shortest_edge""": 20}
snake_case__ : Union[str, Any] = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
snake_case__ : Tuple = parent
snake_case__ : Tuple = batch_size
snake_case__ : List[str] = num_channels
snake_case__ : Any = image_size
snake_case__ : str = min_resolution
snake_case__ : Dict = max_resolution
snake_case__ : Optional[int] = do_resize
snake_case__ : int = size
snake_case__ : List[Any] = do_center_crop
snake_case__ : int = crop_size
snake_case__ : Dict = do_flip_channel_order
def lowerCamelCase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class UpperCAmelCase_ ( _a , unittest.TestCase ):
"""simple docstring"""
lowercase = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase ( self : List[str] ):
snake_case__ : List[str] = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase ( self : Optional[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case_ , """do_resize""" ) )
self.assertTrue(hasattr(snake_case_ , """size""" ) )
self.assertTrue(hasattr(snake_case_ , """do_center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """center_crop""" ) )
self.assertTrue(hasattr(snake_case_ , """do_flip_channel_order""" ) )
def lowerCamelCase ( self : List[str] ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 20} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def lowerCamelCase ( self : str ):
pass
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , Image.Image )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : List[str] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : int ):
# Initialize image_processing
snake_case__ : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , numpify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : str = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def lowerCamelCase ( self : List[Any] ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case_ , torchify=snake_case_ )
for image in image_inputs:
self.assertIsInstance(snake_case_ , torch.Tensor )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(snake_case_ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 43 | 0 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__A = logging.get_logger(__name__)
__A = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt'}
# See all LED models at https://huggingface.co/models?filter=LED
__A = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__A = {
'allenai/led-base-16384': 1_63_84,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase_ ( ) -> Any:
"""simple docstring"""
__lowerCamelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__lowerCamelCase = bs[:]
__lowerCamelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCAmelCase_ )
cs.append(2**8 + n )
n += 1
__lowerCamelCase = [chr(UpperCAmelCase_ ) for n in cs]
return dict(zip(UpperCAmelCase_ , UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] ) -> Tuple:
"""simple docstring"""
__lowerCamelCase = set()
__lowerCamelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCamelCase = char
return pairs
class __lowerCAmelCase ( _lowerCamelCase ):
"""simple docstring"""
snake_case_ = VOCAB_FILES_NAMES
snake_case_ = PRETRAINED_VOCAB_FILES_MAP
snake_case_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case_ = ['''input_ids''', '''attention_mask''']
def __init__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__="replace" , lowerCamelCase__="<s>" , lowerCamelCase__="</s>" , lowerCamelCase__="</s>" , lowerCamelCase__="<s>" , lowerCamelCase__="<unk>" , lowerCamelCase__="<pad>" , lowerCamelCase__="<mask>" , lowerCamelCase__=False , **lowerCamelCase__ , ) -> List[str]:
'''simple docstring'''
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else bos_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else eos_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else sep_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else cls_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else unk_token
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__lowerCamelCase = AddedToken(lowerCamelCase__ , lstrip=lowerCamelCase__ , rstrip=lowerCamelCase__ ) if isinstance(lowerCamelCase__ , lowerCamelCase__ ) else mask_token
super().__init__(
errors=lowerCamelCase__ , bos_token=lowerCamelCase__ , eos_token=lowerCamelCase__ , unk_token=lowerCamelCase__ , sep_token=lowerCamelCase__ , cls_token=lowerCamelCase__ , pad_token=lowerCamelCase__ , mask_token=lowerCamelCase__ , add_prefix_space=lowerCamelCase__ , **lowerCamelCase__ , )
with open(lowerCamelCase__ , encoding='utf-8' ) as vocab_handle:
__lowerCamelCase = json.load(lowerCamelCase__ )
__lowerCamelCase = {v: k for k, v in self.encoder.items()}
__lowerCamelCase = errors # how to handle errors in decoding
__lowerCamelCase = bytes_to_unicode()
__lowerCamelCase = {v: k for k, v in self.byte_encoder.items()}
with open(lowerCamelCase__ , encoding='utf-8' ) as merges_handle:
__lowerCamelCase = merges_handle.read().split('\n' )[1:-1]
__lowerCamelCase = [tuple(merge.split() ) for merge in bpe_merges]
__lowerCamelCase = dict(zip(lowerCamelCase__ , range(len(lowerCamelCase__ ) ) ) )
__lowerCamelCase = {}
__lowerCamelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__lowerCamelCase = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def lowercase_ ( self ) -> Tuple:
'''simple docstring'''
return len(self.encoder )
def lowercase_ ( self ) -> List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = get_pairs(lowerCamelCase__ )
if not pairs:
return token
while True:
__lowerCamelCase = min(lowerCamelCase__ , key=lambda lowerCamelCase__ : self.bpe_ranks.get(lowerCamelCase__ , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCamelCase , __lowerCamelCase = bigram
__lowerCamelCase = []
__lowerCamelCase = 0
while i < len(lowerCamelCase__ ):
try:
__lowerCamelCase = word.index(lowerCamelCase__ , lowerCamelCase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCamelCase = j
if word[i] == first and i < len(lowerCamelCase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCamelCase = tuple(lowerCamelCase__ )
__lowerCamelCase = new_word
if len(lowerCamelCase__ ) == 1:
break
else:
__lowerCamelCase = get_pairs(lowerCamelCase__ )
__lowerCamelCase = ' '.join(lowerCamelCase__ )
__lowerCamelCase = word
return word
def lowercase_ ( self , lowerCamelCase__ ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = []
for token in re.findall(self.pat , lowerCamelCase__ ):
__lowerCamelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(lowerCamelCase__ ).split(' ' ) )
return bpe_tokens
def lowercase_ ( self , lowerCamelCase__ ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ , self.encoder.get(self.unk_token ) )
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = ''.join(lowerCamelCase__ )
__lowerCamelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Dict:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCamelCase = os.path.join(
lowerCamelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase__ , ensure_ascii=lowerCamelCase__ ) + '\n' )
__lowerCamelCase = 0
with open(lowerCamelCase__ , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda lowerCamelCase__ : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
' Please check that the tokenizer is not corrupted!' )
__lowerCamelCase = token_index
writer.write(' '.join(lowerCamelCase__ ) + '\n' )
index += 1
return vocab_file, merge_file
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> List[Any]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
__lowerCamelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = False ) -> int:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ , token_ids_a=lowerCamelCase__ , already_has_special_tokens=lowerCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(lowerCamelCase__ )) + [1]
return [1] + ([0] * len(lowerCamelCase__ )) + [1, 1] + ([0] * len(lowerCamelCase__ )) + [1]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None ) -> Tuple:
'''simple docstring'''
__lowerCamelCase = [self.sep_token_id]
__lowerCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__=False , **lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(lowerCamelCase__ ) > 0 and not text[0].isspace()):
__lowerCamelCase = ' ' + text
return (text, kwargs)
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ = None , lowerCamelCase__ = PaddingStrategy.DO_NOT_PAD , lowerCamelCase__ = None , lowerCamelCase__ = None , ) -> Optional[int]:
'''simple docstring'''
__lowerCamelCase = super()._pad(
encoded_inputs=lowerCamelCase__ , max_length=lowerCamelCase__ , padding_strategy=lowerCamelCase__ , pad_to_multiple_of=lowerCamelCase__ , return_attention_mask=lowerCamelCase__ , )
# Load from model defaults
if return_attention_mask is None:
__lowerCamelCase = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__lowerCamelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__lowerCamelCase = len(encoded_inputs['global_attention_mask'] ) != len(lowerCamelCase__ )
if needs_to_be_padded:
__lowerCamelCase = len(lowerCamelCase__ ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__lowerCamelCase = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__lowerCamelCase = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 90 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
@property
def UpperCamelCase ( self: str ):
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = (3, 32, 1_28)
A__ = tempfile.mkdtemp()
# fmt: off
A__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A__ = dict(zip(UpperCamelCase , range(len(UpperCamelCase ) ) ) )
A__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(UpperCamelCase ) + """\n""" )
A__ = {
"""do_normalize""": False,
"""do_resize""": True,
"""image_processor_type""": """ViTImageProcessor""",
"""resample""": 3,
"""size""": {"""height""": 32, """width""": 1_28},
}
A__ = os.path.join(self.tmpdirname , UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[str] , **UpperCamelCase: Union[str, Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: List[Any] , **UpperCamelCase: str ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )
A__ = Image.fromarray(np.moveaxis(UpperCamelCase , 0 , -1 ) )
return image_input
def UpperCamelCase ( self: Tuple ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: Dict ):
"""simple docstring"""
A__ = self.get_tokenizer()
A__ = self.get_image_processor()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
A__ = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ = self.get_image_processor(do_normalize=UpperCamelCase , padding_value=1.0 )
A__ = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=UpperCamelCase , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = self.prepare_image_inputs()
A__ = image_processor(UpperCamelCase , return_tensors="""np""" )
A__ = processor(images=UpperCamelCase , return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase ( self: Union[str, Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = processor(text=UpperCamelCase )
A__ = tokenizer(UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = """test"""
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """labels"""] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase ):
processor()
def UpperCamelCase ( self: List[str] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
A__ = processor.char_decode(UpperCamelCase )
A__ = tokenizer.batch_decode(UpperCamelCase )
A__ = [seq.replace(""" """ , """""" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase ( self: List[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = None
A__ = self.prepare_image_inputs()
A__ = processor(text=UpperCamelCase , images=UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def UpperCamelCase ( self: Optional[Any] ):
"""simple docstring"""
A__ = self.get_image_processor()
A__ = self.get_tokenizer()
A__ = MgpstrProcessor(tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
A__ = torch.randn(1 , 27 , 38 )
A__ = torch.randn(1 , 27 , 5_02_57 )
A__ = torch.randn(1 , 27 , 3_05_22 )
A__ = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["""generated_text""", """scores""", """char_preds""", """bpe_preds""", """wp_preds"""] )
| 335 | 0 |
from __future__ import annotations
import unittest
from transformers import LEDConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFLEDForConditionalGeneration, TFLEDModel
@require_tf
class _snake_case :
SCREAMING_SNAKE_CASE__ = LEDConfig
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 'gelu'
def __init__( self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=False , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=2 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=20 , _lowerCamelCase=2 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=4 , ):
a :Dict = parent
a :Tuple = batch_size
a :List[Any] = seq_length
a :Any = is_training
a :Tuple = use_labels
a :Any = vocab_size
a :List[str] = hidden_size
a :Dict = num_hidden_layers
a :Optional[int] = num_attention_heads
a :Dict = intermediate_size
a :Dict = hidden_dropout_prob
a :str = attention_probs_dropout_prob
a :Optional[Any] = max_position_embeddings
a :str = eos_token_id
a :Union[str, Any] = pad_token_id
a :str = bos_token_id
a :List[Any] = attention_window
# `ModelTesterMixin.test_attention_outputs` is expecting attention tensors to be of size
# [num_attention_heads, encoder_seq_length, encoder_key_length], but TFLongformerSelfAttention
# returns attention of shape [num_attention_heads, encoder_seq_length, self.attention_window + 1]
# because its local attention only attends to `self.attention_window` and one before and one after
a :str = self.attention_window + 2
# because of padding `encoder_seq_length`, is different from `seq_length`. Relevant for
# the `test_attention_outputs` and `test_hidden_states_output` tests
a :Optional[int] = (
self.seq_length + (self.attention_window - self.seq_length % self.attention_window) % self.attention_window
)
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
a :Any = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
a :Union[str, Any] = tf.concat([input_ids, eos_tensor] , axis=1 )
a :int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
a :str = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , attention_window=self.attention_window , **self.config_updates , )
a :Any = prepare_led_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
a :Tuple = tf.concat(
[tf.zeros_like(_lowerCamelCase )[:, :-1], tf.ones_like(_lowerCamelCase )[:, -1:]] , axis=-1 , )
a :int = global_attention_mask
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase , _lowerCamelCase ):
a :str = TFLEDModel(config=_lowerCamelCase ).get_decoder()
a :Optional[Any] = inputs_dict['''input_ids''']
a :str = input_ids[:1, :]
a :str = inputs_dict['''attention_mask'''][:1, :]
a :int = 1
# first forward pass
a :Dict = model(_lowerCamelCase , attention_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
a , a :str = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
a :Tuple = ids_tensor((self.batch_size, 3) , config.vocab_size )
a :Any = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
a :Optional[Any] = tf.concat([input_ids, next_tokens] , axis=-1 )
a :Optional[int] = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
a :Any = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
a :Union[str, Any] = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
a :Union[str, Any] = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
a :str = output_from_no_past[:, -3:, random_slice_idx]
a :Optional[Any] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : str , UpperCAmelCase_ : str , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=None , UpperCAmelCase_ : int=None , ):
"""simple docstring"""
if attention_mask is None:
a :int = tf.cast(tf.math.not_equal(UpperCAmelCase_ , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
a :Optional[int] = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
a :Optional[int] = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
a :Tuple = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"attention_mask": attention_mask,
"decoder_input_ids": decoder_input_ids,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
}
@require_tf
class _snake_case ( _snake_case , _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (TFLEDForConditionalGeneration, TFLEDModel) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (TFLEDForConditionalGeneration,) if is_tf_available() else ()
SCREAMING_SNAKE_CASE__ = (
{
'conversational': TFLEDForConditionalGeneration,
'feature-extraction': TFLEDModel,
'summarization': TFLEDForConditionalGeneration,
'text2text-generation': TFLEDForConditionalGeneration,
'translation': TFLEDForConditionalGeneration,
}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[int] = TFLEDModelTester(self )
a :Any = ConfigTester(self , config_class=_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a , a :List[str] = self.model_tester.prepare_config_and_inputs_for_common()
a :List[str] = tf.zeros_like(inputs_dict['''attention_mask'''] )
a :int = 2
a :int = tf.where(
tf.range(self.model_tester.seq_length )[None, :] < num_global_attn_indices , 1 , inputs_dict['''global_attention_mask'''] , )
a :List[str] = True
a :int = self.model_tester.seq_length
a :List[str] = self.model_tester.encoder_seq_length
def check_decoder_attentions_output(_lowerCamelCase ):
a :List[Any] = outputs.decoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
def check_encoder_attentions_output(_lowerCamelCase ):
a :List[str] = [t.numpy() for t in outputs.encoder_attentions]
a :Dict = [t.numpy() for t in outputs.encoder_global_attentions]
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_length, seq_length] , )
self.assertListEqual(
list(global_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, num_global_attn_indices] , )
for model_class in self.all_model_classes:
a :Union[str, Any] = True
a :Optional[Any] = False
a :Dict = False
a :Optional[int] = model_class(_lowerCamelCase )
a :Optional[Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
a :List[str] = len(_lowerCamelCase )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
if self.is_encoder_decoder:
a :List[Any] = model_class(_lowerCamelCase )
a :Optional[Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_decoder_attentions_output(_lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
a :str = True
a :Dict = model_class(_lowerCamelCase )
a :Optional[int] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
# Check attention is always last and order is fine
a :Optional[Any] = True
a :Optional[Any] = True
a :Optional[int] = model_class(_lowerCamelCase )
a :Optional[Any] = model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
@unittest.skip('''LED keeps using potentially symbolic tensors in conditionals and breaks tracing.''' )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
# TODO: Head-masking not yet implement
pass
def __lowerCamelCase ( UpperCAmelCase_ : List[str] ):
"""simple docstring"""
return tf.constant(UpperCAmelCase_ , dtype=tf.intaa )
snake_case : Optional[Any] = 1E-4
@slow
@require_tf
class _snake_case ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' ).led
# change to intended input here
a :Tuple = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a :int = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a :Optional[int] = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
a :int = model(**_lowerCamelCase )[0]
a :Optional[int] = (1, 1024, 768)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
a :List[Any] = tf.convert_to_tensor(
[[2.3050, 2.8279, 0.6531], [-1.8457, -0.1455, -3.5661], [-1.0186, 0.4586, -2.2043]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-3 )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = TFLEDForConditionalGeneration.from_pretrained('''allenai/led-base-16384''' )
# change to intended input here
a :str = _long_tensor([512 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a :Tuple = _long_tensor([128 * [0, 3_1414, 232, 328, 740, 1140, 1_2695, 69]] )
a :Any = prepare_led_inputs_dict(model.config , _lowerCamelCase , _lowerCamelCase )
a :List[str] = model(**_lowerCamelCase )[0]
a :Dict = (1, 1024, model.config.vocab_size)
self.assertEqual(output.shape , _lowerCamelCase )
# change to expected output here
a :str = tf.convert_to_tensor(
[[33.6507, 6.4572, 16.8089], [5.8739, -2.4238, 11.2902], [-3.2139, -4.3149, 4.2783]] , )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-3 , rtol=1e-3 )
| 281 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
snake_case : Union[str, Any] = logging.get_logger(__name__)
snake_case : List[str] = {
'''google/efficientnet-b7''': '''https://huggingface.co/google/efficientnet-b7/resolve/main/config.json''',
}
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = 'efficientnet'
def __init__( self , _lowerCamelCase = 3 , _lowerCamelCase = 600 , _lowerCamelCase = 2.0 , _lowerCamelCase = 3.1 , _lowerCamelCase = 8 , _lowerCamelCase = [3, 3, 5, 3, 5, 5, 3] , _lowerCamelCase = [32, 16, 24, 40, 80, 112, 192] , _lowerCamelCase = [16, 24, 40, 80, 112, 192, 320] , _lowerCamelCase = [] , _lowerCamelCase = [1, 2, 2, 2, 1, 2, 1] , _lowerCamelCase = [1, 2, 2, 3, 3, 4, 1] , _lowerCamelCase = [1, 6, 6, 6, 6, 6, 6] , _lowerCamelCase = 0.25 , _lowerCamelCase = "swish" , _lowerCamelCase = 2560 , _lowerCamelCase = "mean" , _lowerCamelCase = 0.02 , _lowerCamelCase = 0.001 , _lowerCamelCase = 0.99 , _lowerCamelCase = 0.5 , _lowerCamelCase = 0.2 , **_lowerCamelCase , ):
super().__init__(**_lowerCamelCase )
a :Optional[int] = num_channels
a :List[str] = image_size
a :int = width_coefficient
a :Optional[Any] = depth_coefficient
a :Any = depth_divisor
a :Any = kernel_sizes
a :Tuple = in_channels
a :Union[str, Any] = out_channels
a :Any = depthwise_padding
a :Any = strides
a :Optional[Any] = num_block_repeats
a :Tuple = expand_ratios
a :Dict = squeeze_expansion_ratio
a :int = hidden_act
a :Dict = hidden_dim
a :Tuple = pooling_type
a :Any = initializer_range
a :Tuple = batch_norm_eps
a :Optional[int] = batch_norm_momentum
a :List[Any] = dropout_rate
a :Optional[int] = drop_connect_rate
a :Tuple = sum(_lowerCamelCase ) * 4
class _snake_case ( _snake_case ):
SCREAMING_SNAKE_CASE__ = version.parse('1.11' )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ):
return 1e-5
| 281 | 1 |
class lowerCAmelCase :
def __init__( self : str ) -> Optional[Any]:
lowerCamelCase__ : Tuple = {}
def A_ ( self : List[Any] ) -> None:
print(self.vertex )
for i in self.vertex:
print(UpperCAmelCase , ' -> ' , ' -> '.join([str(UpperCAmelCase ) for j in self.vertex[i]] ) )
def A_ ( self : List[Any] , UpperCAmelCase : int , UpperCAmelCase : int ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(UpperCAmelCase )
else:
# else make a new vertex
lowerCamelCase__ : Union[str, Any] = [to_vertex]
def A_ ( self : Optional[int] ) -> None:
# visited array for storing already visited nodes
lowerCamelCase__ : List[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(UpperCAmelCase , UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : list ) -> None:
# mark start vertex as visited
lowerCamelCase__ : int = True
print(UpperCAmelCase , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(UpperCAmelCase , UpperCAmelCase )
if __name__ == "__main__":
_UpperCAmelCase : int = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 50 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = 42
class lowerCAmelCase ( __UpperCamelCase, __UpperCamelCase ):
@register_to_config
def __init__( self : List[str] , UpperCAmelCase : int = 65536 , UpperCAmelCase : Optional[int] = None , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 2 , UpperCAmelCase : int = 0 , UpperCAmelCase : str = "fourier" , UpperCAmelCase : bool = True , UpperCAmelCase : bool = False , UpperCAmelCase : float = 0.0 , UpperCAmelCase : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") , UpperCAmelCase : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") , UpperCAmelCase : Tuple[str] = "UNetMidBlock1D" , UpperCAmelCase : str = None , UpperCAmelCase : Tuple[int] = (32, 32, 64) , UpperCAmelCase : str = None , UpperCAmelCase : int = 8 , UpperCAmelCase : int = 1 , UpperCAmelCase : bool = False , ) -> List[Any]:
super().__init__()
lowerCamelCase__ : Optional[int] = sample_size
# time
if time_embedding_type == "fourier":
lowerCamelCase__ : Optional[Any] = GaussianFourierProjection(
embedding_size=8 , set_W_to_weight=UpperCAmelCase , log=UpperCAmelCase , flip_sin_to_cos=UpperCAmelCase )
lowerCamelCase__ : Any = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowerCamelCase__ : List[Any] = Timesteps(
block_out_channels[0] , flip_sin_to_cos=UpperCAmelCase , downscale_freq_shift=UpperCAmelCase )
lowerCamelCase__ : Dict = block_out_channels[0]
if use_timestep_embedding:
lowerCamelCase__ : str = block_out_channels[0] * 4
lowerCamelCase__ : List[Any] = TimestepEmbedding(
in_channels=UpperCAmelCase , time_embed_dim=UpperCAmelCase , act_fn=UpperCAmelCase , out_dim=block_out_channels[0] , )
lowerCamelCase__ : Any = nn.ModuleList([] )
lowerCamelCase__ : Tuple = None
lowerCamelCase__ : List[str] = nn.ModuleList([] )
lowerCamelCase__ : Optional[int] = None
# down
lowerCamelCase__ : Optional[int] = in_channels
for i, down_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Union[str, Any] = output_channel
lowerCamelCase__ : Tuple = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowerCamelCase__ : Union[str, Any] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Optional[int] = get_down_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_downsample=not is_final_block or downsample_each_block , )
self.down_blocks.append(UpperCAmelCase )
# mid
lowerCamelCase__ : Optional[int] = get_mid_block(
UpperCAmelCase , in_channels=block_out_channels[-1] , mid_channels=block_out_channels[-1] , out_channels=block_out_channels[-1] , embed_dim=block_out_channels[0] , num_layers=UpperCAmelCase , add_downsample=UpperCAmelCase , )
# up
lowerCamelCase__ : Optional[int] = list(reversed(UpperCAmelCase ) )
lowerCamelCase__ : Optional[int] = reversed_block_out_channels[0]
if out_block_type is None:
lowerCamelCase__ : List[str] = out_channels
else:
lowerCamelCase__ : Any = block_out_channels[0]
for i, up_block_type in enumerate(UpperCAmelCase ):
lowerCamelCase__ : Tuple = output_channel
lowerCamelCase__ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(UpperCAmelCase ) - 1 else final_upsample_channels
)
lowerCamelCase__ : List[str] = i == len(UpperCAmelCase ) - 1
lowerCamelCase__ : Dict = get_up_block(
UpperCAmelCase , num_layers=UpperCAmelCase , in_channels=UpperCAmelCase , out_channels=UpperCAmelCase , temb_channels=block_out_channels[0] , add_upsample=not is_final_block , )
self.up_blocks.append(UpperCAmelCase )
lowerCamelCase__ : int = output_channel
# out
lowerCamelCase__ : int = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 , 32 )
lowerCamelCase__ : List[Any] = get_out_block(
out_block_type=UpperCAmelCase , num_groups_out=UpperCAmelCase , embed_dim=block_out_channels[0] , out_channels=UpperCAmelCase , act_fn=UpperCAmelCase , fc_dim=block_out_channels[-1] // 4 , )
def A_ ( self : List[Any] , UpperCAmelCase : torch.FloatTensor , UpperCAmelCase : Union[torch.Tensor, float, int] , UpperCAmelCase : bool = True , ) -> Union[UNetaDOutput, Tuple]:
lowerCamelCase__ : Optional[Any] = timestep
if not torch.is_tensor(UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=sample.device )
elif torch.is_tensor(UpperCAmelCase ) and len(timesteps.shape ) == 0:
lowerCamelCase__ : List[str] = timesteps[None].to(sample.device )
lowerCamelCase__ : Optional[int] = self.time_proj(UpperCAmelCase )
if self.config.use_timestep_embedding:
lowerCamelCase__ : str = self.time_mlp(UpperCAmelCase )
else:
lowerCamelCase__ : List[str] = timestep_embed[..., None]
lowerCamelCase__ : str = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowerCamelCase__ : str = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowerCamelCase__ : str = ()
for downsample_block in self.down_blocks:
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = downsample_block(hidden_states=UpperCAmelCase , temb=UpperCAmelCase )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowerCamelCase__ : Optional[Any] = self.mid_block(UpperCAmelCase , UpperCAmelCase )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowerCamelCase__ : Dict = down_block_res_samples[-1:]
lowerCamelCase__ : Optional[Any] = down_block_res_samples[:-1]
lowerCamelCase__ : Any = upsample_block(UpperCAmelCase , res_hidden_states_tuple=UpperCAmelCase , temb=UpperCAmelCase )
# 5. post-process
if self.out_block:
lowerCamelCase__ : Any = self.out_block(UpperCAmelCase , UpperCAmelCase )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=UpperCAmelCase )
| 50 | 1 |
"""simple docstring"""
def UpperCamelCase__ ( SCREAMING_SNAKE_CASE__ ):
if not numbers:
return 0
if not isinstance(SCREAMING_SNAKE_CASE__ , (list, tuple) ) or not all(
isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
__lowerCamelCase : Optional[int] = numbers[0]
for i in range(1 , len(SCREAMING_SNAKE_CASE__ ) ):
# update the maximum and minimum subarray products
__lowerCamelCase : Optional[Any] = numbers[i]
if number < 0:
__lowerCamelCase : Union[str, Any] = min_till_now, max_till_now
__lowerCamelCase : Optional[Any] = max(SCREAMING_SNAKE_CASE__ , max_till_now * number )
__lowerCamelCase : Optional[Any] = min(SCREAMING_SNAKE_CASE__ , min_till_now * number )
# update the maximum product found till now
__lowerCamelCase : int = max(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
return max_prod
| 353 |
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf
if is_tf_available():
import tensorflow as tf
from tensorflow.python.eager import context
from tensorflow.python.framework import ops
from transformers import GradientAccumulator, create_optimizer
@require_tf
class A_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self: Optional[int] , a: int , a: List[Any] , a: Any ):
self.assertEqual(len(a ) , len(a ) )
for a, b in zip(a , a ):
self.assertAlmostEqual(a , a , delta=a )
def _snake_case ( self: Union[str, Any] ):
__lowerCamelCase : Union[str, Any] = GradientAccumulator()
accumulator([tf.constant([1.0, 2.0] )] )
accumulator([tf.constant([-2.0, 1.0] )] )
accumulator([tf.constant([-1.0, 2.0] )] )
with self.assertRaises(a ):
accumulator([tf.constant([1.0, 1.0] ), tf.constant([2.0, 2.0] )] )
self.assertEqual(accumulator.step , 3 )
self.assertEqual(len(accumulator.gradients ) , 1 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [-2.0, 5.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
self.assertListAlmostEqual(accumulator.gradients[0].numpy().tolist() , [0.0, 0.0] , tol=1e-2 )
def _snake_case ( self: str ):
__lowerCamelCase : Any = None
ops.enable_eager_execution_internal()
__lowerCamelCase : List[str] = tf.config.list_physical_devices('CPU' )
if len(a ) == 1:
tf.config.set_logical_device_configuration(
physical_devices[0] , [tf.config.LogicalDeviceConfiguration(), tf.config.LogicalDeviceConfiguration()] )
__lowerCamelCase : Dict = tf.config.list_logical_devices(device_type='CPU' )
__lowerCamelCase : List[Any] = tf.distribute.MirroredStrategy(devices=devices[:2] )
with strategy.scope():
__lowerCamelCase : Optional[Any] = GradientAccumulator()
__lowerCamelCase : int = tf.Variable([4.0, 3.0] )
__lowerCamelCase , __lowerCamelCase : Any = create_optimizer(5e-5 , 10 , 5 )
__lowerCamelCase : Any = tf.Variable([0.0, 0.0] , trainable=a )
def accumulate_on_replica(a: str ):
accumulator([gradient] )
def apply_on_replica():
optimizer.apply_gradients(list(zip(accumulator.gradients , [variable] ) ) )
@tf.function
def accumulate(a: List[Any] , a: List[str] ):
with strategy.scope():
__lowerCamelCase : Optional[int] = strategy.experimental_local_results(a )
local_variables[0].assign(a )
local_variables[1].assign(a )
strategy.run(a , args=(gradient_placeholder,) )
@tf.function
def apply_grad():
with strategy.scope():
strategy.run(a )
def _check_local_values(a: str , a: Union[str, Any] ):
__lowerCamelCase : Optional[Any] = strategy.experimental_local_results(accumulator._gradients[0] )
self.assertListAlmostEqual(values[0].value() , a , tol=1e-2 )
self.assertListAlmostEqual(values[1].value() , a , tol=1e-2 )
accumulate([1.0, 2.0] , [-1.0, 1.0] )
accumulate([3.0, -1.0] , [-1.0, -1.0] )
accumulate([-2.0, 2.0] , [3.0, -2.0] )
self.assertEqual(accumulator.step , 3 )
_check_local_values([2.0, 3.0] , [1.0, -2.0] )
apply_grad()
self.assertListAlmostEqual(variable.value() , [4.0, 3.0] , tol=1e-2 )
accumulator.reset()
self.assertEqual(accumulator.step , 0 )
_check_local_values([0.0, 0.0] , [0.0, 0.0] )
| 194 | 0 |
'''simple docstring'''
import warnings
from ..trainer import Trainer
from ..utils import logging
A__ : List[Any] =logging.get_logger(__name__)
class UpperCAmelCase ( snake_case_ ):
def __init__( self : Tuple , __snake_case : int=None , **__snake_case : Any ) -> Dict:
warnings.warn(
"""`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` """
"""instead.""" , __snake_case , )
super().__init__(args=__snake_case , **__snake_case )
| 70 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowercase : Tuple = {
"configuration_xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMOnnxConfig"],
"tokenization_xlm": ["XLMTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : int = [
"XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLMForMultipleChoice",
"XLMForQuestionAnswering",
"XLMForQuestionAnsweringSimple",
"XLMForSequenceClassification",
"XLMForTokenClassification",
"XLMModel",
"XLMPreTrainedModel",
"XLMWithLMHeadModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Tuple = [
"TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLMForMultipleChoice",
"TFXLMForQuestionAnsweringSimple",
"TFXLMForSequenceClassification",
"TFXLMForTokenClassification",
"TFXLMMainLayer",
"TFXLMModel",
"TFXLMPreTrainedModel",
"TFXLMWithLMHeadModel",
]
if TYPE_CHECKING:
from .configuration_xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMOnnxConfig
from .tokenization_xlm import XLMTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm import (
XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMPreTrainedModel,
XLMWithLMHeadModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm import (
TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMForMultipleChoice,
TFXLMForQuestionAnsweringSimple,
TFXLMForSequenceClassification,
TFXLMForTokenClassification,
TFXLMMainLayer,
TFXLMModel,
TFXLMPreTrainedModel,
TFXLMWithLMHeadModel,
)
else:
import sys
lowercase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 42 | 0 |
"""simple docstring"""
from typing import Any
class UpperCamelCase_ :
def __init__( self , snake_case__ ) -> int:
"""simple docstring"""
UpperCAmelCase = data
UpperCAmelCase = None
def __repr__( self ) -> str:
"""simple docstring"""
return f'''Node({self.data})'''
class UpperCamelCase_ :
def __init__( self ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = None
def __iter__( self ) -> Any:
"""simple docstring"""
UpperCAmelCase = self.head
while node:
yield node.data
UpperCAmelCase = node.next
def __len__( self ) -> int:
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self ) -> str:
"""simple docstring"""
return "->".join([str(snake_case__ ) for item in self] )
def __getitem__( self , snake_case__ ) -> Any:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , snake_case__ , snake_case__ ) -> None:
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("""list index out of range.""" )
UpperCAmelCase = self.head
for _ in range(snake_case__ ):
UpperCAmelCase = current.next
UpperCAmelCase = data
def UpperCamelCase_ ( self , snake_case__ ) -> None:
"""simple docstring"""
self.insert_nth(len(self ) , snake_case__ )
def UpperCamelCase_ ( self , snake_case__ ) -> None:
"""simple docstring"""
self.insert_nth(0 , snake_case__ )
def UpperCamelCase_ ( self , snake_case__ , snake_case__ ) -> None:
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("""list index out of range""" )
UpperCAmelCase = Node(snake_case__ )
if self.head is None:
UpperCAmelCase = new_node
elif index == 0:
UpperCAmelCase = self.head # link new_node to head
UpperCAmelCase = new_node
else:
UpperCAmelCase = self.head
for _ in range(index - 1 ):
UpperCAmelCase = temp.next
UpperCAmelCase = temp.next
UpperCAmelCase = new_node
def UpperCamelCase_ ( self ) -> None: # print every node data
"""simple docstring"""
print(self )
def UpperCamelCase_ ( self ) -> Any:
"""simple docstring"""
return self.delete_nth(0 )
def UpperCamelCase_ ( self ) -> Any: # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def UpperCamelCase_ ( self , snake_case__ = 0 ) -> Any:
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("""List index out of range.""" )
UpperCAmelCase = self.head # default first node
if index == 0:
UpperCAmelCase = self.head.next
else:
UpperCAmelCase = self.head
for _ in range(index - 1 ):
UpperCAmelCase = temp.next
UpperCAmelCase = temp.next
UpperCAmelCase = temp.next.next
return delete_node.data
def UpperCamelCase_ ( self ) -> bool:
"""simple docstring"""
return self.head is None
def UpperCamelCase_ ( self ) -> None:
"""simple docstring"""
UpperCAmelCase = None
UpperCAmelCase = self.head
while current:
# Store the current node's next node.
UpperCAmelCase = current.next
# Make the current node's next point backwards
UpperCAmelCase = prev
# Make the previous node be the current node
UpperCAmelCase = current
# Make the current node the next node (to progress iteration)
UpperCAmelCase = next_node
# Return prev in order to put the head at the end
UpperCAmelCase = prev
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase ) == i
linked_list.insert_nth(lowerCAmelCase , i + 1 )
assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase ) == 9
assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
UpperCAmelCase = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase ) == "->".join(str(lowerCAmelCase ) for i in range(-8 , 1 ) )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = [
-9,
100,
Node(77345112 ),
"""dlrow olleH""",
7,
5555,
0,
-1_92.5_55_55,
"""Hello, world!""",
77.9,
Node(10 ),
None,
None,
12.20,
]
UpperCAmelCase = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
UpperCAmelCase = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
UpperCAmelCase = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
UpperCAmelCase = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("""Hello again, world!""" ) )
assert (
str(lowerCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase )
assert (
str(lowerCAmelCase )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _lowerCAmelCase ( ):
'''simple docstring'''
from doctest import testmod
testmod()
UpperCAmelCase = LinkedList()
linked_list.insert_head(input("""Inserting 1st at head """ ).strip() )
linked_list.insert_head(input("""Inserting 2nd at head """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
linked_list.insert_tail(input("""\nInserting 1st at tail """ ).strip() )
linked_list.insert_tail(input("""Inserting 2nd at tail """ ).strip() )
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nDelete head""" )
linked_list.delete_head()
print("""Delete tail""" )
linked_list.delete_tail()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nReverse linked list""" )
linked_list.reverse()
print("""\nPrint list:""" )
linked_list.print_list()
print("""\nString representation of linked list:""" )
print(lowerCAmelCase )
print("""\nReading/changing Node data using indexing:""" )
print(F'''Element at Position 1: {linked_list[1]}''' )
UpperCAmelCase = input("""Enter New Value: """ ).strip()
print("""New list:""" )
print(lowerCAmelCase )
print(F'''length of linked_list is : {len(lowerCAmelCase )}''' )
if __name__ == "__main__":
main()
| 248 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase_ : str = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[Any] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase_ : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 248 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class lowerCamelCase__( __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : Dict = ShapEPipeline
UpperCAmelCase__ : Any = ['prompt']
UpperCAmelCase__ : List[Any] = ['prompt']
UpperCAmelCase__ : str = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
UpperCAmelCase__ : Dict = False
@property
def lowerCAmelCase__ ( self: int ):
return 32
@property
def lowerCAmelCase__ ( self: Union[str, Any] ):
return 32
@property
def lowerCAmelCase__ ( self: int ):
return self.time_input_dim * 4
@property
def lowerCAmelCase__ ( self: Optional[Any] ):
return 8
@property
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
return tokenizer
@property
def lowerCAmelCase__ ( self: str ):
torch.manual_seed(0 )
__lowerCamelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModelWithProjection(UpperCamelCase_ )
@property
def lowerCAmelCase__ ( self: List[str] ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
__lowerCamelCase = PriorTransformer(**UpperCamelCase_ )
return model
@property
def lowerCAmelCase__ ( self: Dict ):
torch.manual_seed(0 )
__lowerCamelCase = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
__lowerCamelCase = ShapERenderer(**UpperCamelCase_ )
return model
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.dummy_prior
__lowerCamelCase = self.dummy_text_encoder
__lowerCamelCase = self.dummy_tokenizer
__lowerCamelCase = self.dummy_renderer
__lowerCamelCase = HeunDiscreteScheduler(
beta_schedule="""exp""" , num_train_timesteps=10_24 , prediction_type="""sample""" , use_karras_sigmas=UpperCamelCase_ , clip_sample=UpperCamelCase_ , clip_sample_range=1.0 , )
__lowerCamelCase = {
"""prior""": prior,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def lowerCAmelCase__ ( self: str , UpperCamelCase_: Dict , UpperCamelCase_: Optional[Any]=0 ):
if str(UpperCamelCase_ ).startswith("""mps""" ):
__lowerCamelCase = torch.manual_seed(UpperCamelCase_ )
else:
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
__lowerCamelCase = {
"""prompt""": """horse""",
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase = """cpu"""
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = pipe(**self.get_dummy_inputs(UpperCamelCase_ ) )
__lowerCamelCase = output.images[0]
__lowerCamelCase = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCamelCase = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCAmelCase__ ( self: List[str] ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase = torch_device == """cpu"""
__lowerCamelCase = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=UpperCamelCase_ , relax_max_difference=UpperCamelCase_ , )
def lowerCAmelCase__ ( self: Union[str, Any] ):
__lowerCamelCase = self.get_dummy_components()
__lowerCamelCase = self.pipeline_class(**UpperCamelCase_ )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = 1
__lowerCamelCase = 2
__lowerCamelCase = self.get_dummy_inputs(UpperCamelCase_ )
for key in inputs.keys():
if key in self.batch_params:
__lowerCamelCase = batch_size * [inputs[key]]
__lowerCamelCase = pipe(**UpperCamelCase_ , num_images_per_prompt=UpperCamelCase_ )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class lowerCamelCase__( unittest.TestCase):
def lowerCAmelCase__ ( self: List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase__ ( self: Tuple ):
__lowerCamelCase = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_np_out.npy""" )
__lowerCamelCase = ShapEPipeline.from_pretrained("""openai/shap-e""" )
__lowerCamelCase = pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
__lowerCamelCase = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
__lowerCamelCase = pipe(
"""a shark""" , generator=UpperCamelCase_ , guidance_scale=15.0 , num_inference_steps=64 , frame_size=64 , output_type="""np""" , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(UpperCamelCase_ , UpperCamelCase_ )
| 12 |
import json
import os
from datetime import date
from pathlib import Path
from tabulate import DataRow, TableFormat, tabulate
UpperCAmelCase_ = TableFormat(
lineabove=None,
linebelowheader=None,
linebetweenrows=None,
linebelow=None,
headerrow=DataRow('', '|', '|'),
datarow=DataRow('', '|', '|'),
padding=1,
with_header_hide=None,
)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = {'type': 'section', 'text': {'type': 'plain_text', 'text': 'No failed tests! 🤗', 'emoji': True}}
UpperCAmelCase_ = [
{
'type': 'header',
'text': {
'type': 'plain_text',
'text': f"""🤗 Accelerate nightly {os.environ.get("TEST_TYPE", "")} test results""",
'emoji': True,
},
}
]
UpperCAmelCase_ = 0
for log in Path().glob('*.log'):
UpperCAmelCase_ = 0
with open(log, 'r') as f:
for line in f:
UpperCAmelCase_ = json.loads(line)
if line.get('nodeid', '') != "":
UpperCAmelCase_ = line['nodeid']
if line.get('duration', None) is not None:
UpperCAmelCase_ = f"""{line["duration"]:.4f}"""
if line.get('outcome', '') == "failed":
section_num_failed += 1
failed.append([test, duration, log.name.split('_')[0]])
total_num_failed += 1
group_info.append([str(log), section_num_failed, failed])
UpperCAmelCase_ = []
log.unlink()
UpperCAmelCase_ = ''
UpperCAmelCase_ = []
if total_num_failed > 0:
for name, num_failed, failed_tests in group_info:
if num_failed > 0:
if num_failed == 1:
message += f"*{name[1:]}: {num_failed} failed test*\n"
else:
message += f"*{name[1:]}: {num_failed} failed tests*\n"
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
for test in failed_tests:
UpperCAmelCase_ = test[0].split('::')
UpperCAmelCase_ = data[0].split('/')[-1]
if data[0] not in filesafailed:
UpperCAmelCase_ = [data[1:]]
else:
filesafailed[data[0]] += [data[1:]]
failed_table.append(data)
UpperCAmelCase_ = [test[0] for test in failed_table]
UpperCAmelCase_ = list(set(files))
# Count number of instances in failed_tests
UpperCAmelCase_ = []
for file in individual_files:
table.append([file, len(filesafailed[file])])
UpperCAmelCase_ = tabulate(
table,
headers=['Test Location', 'Num Failed'],
tablefmt=hf_table_format,
stralign='right',
)
message += f"\n```\n{failed_table}\n```"
all_filesafailed.append(filesafailed)
if len(message) > 3_000:
UpperCAmelCase_ = 'Too many failed tests, please see the full report in the Action results.'
UpperCAmelCase_ = len(err) + 10
UpperCAmelCase_ = message[: 3_000 - offset] + f"""\n...\n```\n{err}"""
print(f"""### {message}""")
else:
UpperCAmelCase_ = 'No failed tests! 🤗'
print(f"""## {message}""")
payload.append(no_error_payload)
if os.environ.get('TEST_TYPE', '') != "":
from slack_sdk import WebClient
UpperCAmelCase_ = WebClient(token=os.environ['SLACK_API_TOKEN'])
if message != "No failed tests! 🤗":
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': message,
},
}
payload.append(md_report)
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': '*For more details:*',
},
'accessory': {
'type': 'button',
'text': {
'type': 'plain_text',
'text': 'Check Action results',
'emoji': True,
},
'url': f"""https://github.com/{os.environ["GITHUB_REPOSITORY"]}/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
payload.append(action_button)
UpperCAmelCase_ = {
'type': 'context',
'elements': [
{
'type': 'plain_text',
'text': f"""Nightly {os.environ.get("TEST_TYPE")} test results for {date.today()}""",
}
],
}
payload.append(date_report)
UpperCAmelCase_ = client.chat_postMessage(channel='#accelerate-ci-daily', text=message, blocks=payload)
UpperCAmelCase_ = response.data['ts']
for failed_file in all_filesafailed:
for test_location, test_failures in failed_file.items():
# Keep only the first instance of the test name
UpperCAmelCase_ = ''
for i, row in enumerate(test_failures):
if row[0] != test_class:
UpperCAmelCase_ = row[0]
else:
UpperCAmelCase_ = ''
UpperCAmelCase_ = {
'type': 'section',
'text': {
'type': 'mrkdwn',
'text': f"""Test location: {test_location}\n```\n{tabulate(test_failures, headers=["Class", "Test"], tablefmt=hf_table_format, stralign="right")}\n```""",
},
}
client.chat_postMessage(
channel='#accelerate-ci-daily',
thread_ts=ts,
blocks=[payload],
)
| 12 | 1 |
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class __snake_case ( unittest.TestCase , a ):
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = load_tool('''text-to-speech''')
self.tool.setup()
def lowerCamelCase ( self : int):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
def lowerCamelCase ( self : Any):
"""simple docstring"""
torch.manual_seed(0)
UpperCAmelCase_ = self.tool('''hey''')
UpperCAmelCase_ = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.0_0_0_5_9_6_6_6_6_8_8_3_2_1_1_5_8_2_9, -0.0_0_0_3_6_5_7_6_4_0_1_9_0_7_9_5_0_6_4, -0.0_0_0_1_3_4_3_9_5_0_2_7_9_9_8_8_3_4_8_5]) , ))
| 7 |
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int] , _snake_case : Union[str, Any]):
"""simple docstring"""
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['''bs'''] , model_result['''ss''']):
UpperCAmelCase_ = model_result['''result'''][batch_size][sequence_length]
self.assertIsNotNone(_snake_case)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : str):
"""simple docstring"""
UpperCAmelCase_ = '''sgugger/tiny-distilbert-classification'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , only_pretrain_model=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , torchscript=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Cant do half precision''')
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , fpaa=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
# set architectures equal to `None`
UpperCAmelCase_ = None
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
@unittest.skipIf(torch_device == '''cpu''' , '''Can\'t do half precision''')
def lowerCamelCase ( self : Optional[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result)
self.check_results_dict_not_empty(results.memory_inference_result)
def lowerCamelCase ( self : Dict):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tinier_bart'''
UpperCAmelCase_ = AutoConfig.from_pretrained(_snake_case)
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case , configs=[config])
UpperCAmelCase_ = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result)
self.check_results_dict_not_empty(results.memory_train_result)
def lowerCamelCase ( self : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , save_to_csv=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_snake_case , '''inf_time.csv''') , train_memory_csv_file=os.path.join(_snake_case , '''train_mem.csv''') , inference_memory_csv_file=os.path.join(_snake_case , '''inf_mem.csv''') , train_time_csv_file=os.path.join(_snake_case , '''train_time.csv''') , env_info_csv_file=os.path.join(_snake_case , '''env.csv''') , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
benchmark.run()
self.assertTrue(Path(os.path.join(_snake_case , '''inf_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_time.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''inf_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''train_mem.csv''')).exists())
self.assertTrue(Path(os.path.join(_snake_case , '''env.csv''')).exists())
def lowerCamelCase ( self : List[str]):
"""simple docstring"""
UpperCAmelCase_ = '''sshleifer/tiny-gpt2'''
def _check_summary_is_not_empty(_snake_case : Tuple):
self.assertTrue(hasattr(_snake_case , '''sequential'''))
self.assertTrue(hasattr(_snake_case , '''cumulative'''))
self.assertTrue(hasattr(_snake_case , '''current'''))
self.assertTrue(hasattr(_snake_case , '''total'''))
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCAmelCase_ = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_snake_case , inference=_snake_case , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_snake_case , '''log.txt''') , log_print=_snake_case , trace_memory_line_by_line=_snake_case , multi_process=_snake_case , )
UpperCAmelCase_ = PyTorchBenchmark(_snake_case)
UpperCAmelCase_ = benchmark.run()
_check_summary_is_not_empty(result.inference_summary)
_check_summary_is_not_empty(result.train_summary)
self.assertTrue(Path(os.path.join(_snake_case , '''log.txt''')).exists())
| 7 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import is_tensorflow_text_available, is_tf_available
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
from ..test_modeling_tf_common import floats_tensor
from .test_framework_agnostic import GenerationIntegrationTestsMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
AutoTokenizer,
TFAutoModelForCausalLM,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSpeechSeqaSeq,
TFAutoModelForVisionaSeq,
TFBartForConditionalGeneration,
TFLogitsProcessorList,
TFMinLengthLogitsProcessor,
tf_top_k_top_p_filtering,
)
if is_tensorflow_text_available():
import tensorflow_text as text
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[
[
8.2_22_09_91, # 3rd highest value; idx. 0
-0.5_62_00_44,
5.23_22_97_52,
4.0_38_63_93,
-6.8_79_83_78,
-0.54_78_58_02,
-3.2_01_21_53,
2.92_77_71_76,
1.88_17_19_53,
7.35_34_12_76, # 5th highest value; idx. 9
8.43_20_78_33, # 2nd highest value; idx. 10
-9.85_71_18_36,
-5.96_20_92_36,
-1.13_03_91_61,
-7.1_11_52_94,
-0.8_36_96_33,
-5.3_18_64_08,
7.06_42_74_07,
0.81_36_93_44,
-0.82_02_38_17,
-5.9_17_97_96,
0.58_81_34_43,
-6.99_77_84_38,
4.71_55_11_89,
-0.18_77_16_37,
7.44_02_07_59, # 4th highest value; idx. 25
9.38_45_09_87, # 1st highest value; idx. 26
2.12_66_29_41,
-9.32_56_20_38,
2.35_65_25_22,
], # cummulative prob of 5 highest values <= 0.6
[
0.58_42_55_18,
4.53_13_92_38,
-5.57_51_04_64,
-6.28_03_06_99,
-7.19_52_95_03,
-4.02_12_25_51,
1.39_33_70_37,
-6.06_70_70_57,
1.59_48_05_17,
-9.64_31_19,
0.03_90_77_99,
0.67_23_17_62,
-8.88_20_67_26,
6.27_11_59_22, # 4th highest value; idx. 13
2.28_52_07_23,
4.82_76_75_06,
4.30_42_13_68,
8.8_27_53_13, # 2nd highest value; idx. 17
5.44_02_99_58, # 5th highest value; idx. 18
-4.4_73_57_94,
7.38_57_95_36, # 3rd highest value; idx. 20
-2.91_05_16_63,
2.61_94_60_77,
-2.5_67_47_62,
-9.48_95_93_02,
-4.02_92_26_45,
-1.35_41_69_18,
9.67_70_23_23, # 1st highest value; idx. 27
-5.89_47_85_53,
1.85_37_04_67,
], # cummulative prob of 5 highest values <= 0.6
] , dtype=tf.floataa , )
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 1_0], [0, 2_5], [0, 2_6], [1, 1_3], [1, 1_7], [1, 1_8], [1, 2_0], [1, 2_7]] , dtype=tf.intaa , ) # expected non filtered idx as noted above
__SCREAMING_SNAKE_CASE = tf.convert_to_tensor(
[8.22_20_99, 7.3_53_41_26, 8.43_20_78, 7.4_40_20_75, 9.3_84_51, 6.27_11_59, 8.82_75_31, 5.4_40_29_95, 7.3_85_79_56, 9.67_70_23] , dtype=tf.floataa , ) # expected non filtered values as noted above
__SCREAMING_SNAKE_CASE = tf_top_k_top_p_filtering(lowerCAmelCase__ , top_k=1_0 , top_p=0.6 , min_tokens_to_keep=4)
__SCREAMING_SNAKE_CASE = output[output != -float("""inf""")]
__SCREAMING_SNAKE_CASE = tf.cast(
tf.where(tf.not_equal(lowerCAmelCase__ , tf.constant(-float("""inf""") , dtype=tf.floataa))) , dtype=tf.intaa , )
tf.debugging.assert_near(lowerCAmelCase__ , lowerCAmelCase__ , rtol=1E-12)
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__)
@require_tf
class SCREAMING_SNAKE_CASE_ ( unittest.TestCase , __a ):
"""simple docstring"""
if is_tf_available():
__lowercase : Union[str, Any] = {
"""AutoModelForCausalLM""": TFAutoModelForCausalLM,
"""AutoModelForSpeechSeq2Seq""": TFAutoModelForSpeechSeqaSeq,
"""AutoModelForSeq2SeqLM""": TFAutoModelForSeqaSeqLM,
"""AutoModelForVision2Seq""": TFAutoModelForVisionaSeq,
"""LogitsProcessorList""": TFLogitsProcessorList,
"""MinLengthLogitsProcessor""": TFMinLengthLogitsProcessor,
"""create_tensor_fn""": tf.convert_to_tensor,
"""floats_tensor""": floats_tensor,
"""return_tensors""": """tf""",
}
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__):
super(lowerCAmelCase__ , self).__init__()
__SCREAMING_SNAKE_CASE = model
@tf.function(
input_signature=(
tf.TensorSpec((None, input_length) , tf.intaa , name="""input_ids"""),
tf.TensorSpec((None, input_length) , tf.intaa , name="""attention_mask"""),
) , jit_compile=lowerCAmelCase__ , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model.generate(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ , return_dict_in_generate=lowerCAmelCase__ , )
return {"sequences": outputs["sequences"]}
__SCREAMING_SNAKE_CASE = [[2, 0], [1_0_2, 1_0_3]]
__SCREAMING_SNAKE_CASE = [[1, 0], [1, 1]]
__SCREAMING_SNAKE_CASE = DummyModel(model=lowerCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={"""serving_default""": dummy_model.serving})
__SCREAMING_SNAKE_CASE = tf.saved_model.load(lowerCAmelCase__).signatures["""serving_default"""]
for batch_size in range(1 , len(lowerCAmelCase__) + 1):
__SCREAMING_SNAKE_CASE = {
"""input_ids""": tf.constant(dummy_input_ids[:batch_size]),
"""attention_mask""": tf.constant(dummy_attention_masks[:batch_size]),
}
__SCREAMING_SNAKE_CASE = serving_func(**lowerCAmelCase__)["""sequences"""]
__SCREAMING_SNAKE_CASE = test_model.generate(**lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__)
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__)
@slow
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
class SCREAMING_SNAKE_CASE_ ( tf.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__):
super(lowerCAmelCase__ , self).__init__()
__SCREAMING_SNAKE_CASE = model
@tf.function(
input_signature=(
tf.TensorSpec((batch_size, None) , tf.intaa , name="""input_ids"""),
tf.TensorSpec((batch_size, None) , tf.intaa , name="""attention_mask"""),
) , jit_compile=lowerCAmelCase__ , )
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.model.generate(
input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__ , return_dict_in_generate=lowerCAmelCase__ , )
return {"sequences": outputs["sequences"]}
__SCREAMING_SNAKE_CASE = [[2], [1_0_2, 1_0_3]]
__SCREAMING_SNAKE_CASE = [[1], [1, 1]]
__SCREAMING_SNAKE_CASE = DummyModel(model=lowerCAmelCase__)
with tempfile.TemporaryDirectory() as tmp_dir:
tf.saved_model.save(lowerCAmelCase__ , lowerCAmelCase__ , signatures={"""serving_default""": dummy_model.serving})
__SCREAMING_SNAKE_CASE = tf.saved_model.load(lowerCAmelCase__).signatures["""serving_default"""]
for input_row in range(len(lowerCAmelCase__)):
__SCREAMING_SNAKE_CASE = {
"""input_ids""": tf.constant([dummy_input_ids[input_row]]),
"""attention_mask""": tf.constant([dummy_attention_masks[input_row]]),
}
__SCREAMING_SNAKE_CASE = serving_func(**lowerCAmelCase__)["""sequences"""]
__SCREAMING_SNAKE_CASE = test_model.generate(**lowerCAmelCase__ , max_new_tokens=lowerCAmelCase__)
tf.debugging.assert_equal(lowerCAmelCase__ , lowerCAmelCase__)
@slow
@require_tensorflow_text
def snake_case_ ( self):
with tempfile.TemporaryDirectory() as tmp_dir:
# file needed to load the TF tokenizer
hf_hub_download(repo_id="""google/flan-t5-small""" , filename="""spiece.model""" , local_dir=lowerCAmelCase__)
class SCREAMING_SNAKE_CASE_ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self):
super().__init__()
__SCREAMING_SNAKE_CASE = text.SentencepieceTokenizer(
model=tf.io.gfile.GFile(os.path.join(lowerCAmelCase__ , """spiece.model""") , """rb""").read())
__SCREAMING_SNAKE_CASE = TFAutoModelForSeqaSeqLM.from_pretrained("""hf-internal-testing/tiny-random-t5""")
def snake_case_ ( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__):
__SCREAMING_SNAKE_CASE = self.tokenizer.tokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = text.pad_model_inputs(
lowerCAmelCase__ , max_seq_length=6_4 , pad_value=self.model.config.pad_token_id)
__SCREAMING_SNAKE_CASE = self.model.generate(input_ids=lowerCAmelCase__ , attention_mask=lowerCAmelCase__)
return self.tokenizer.detokenize(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = CompleteSentenceTransformer()
__SCREAMING_SNAKE_CASE = tf.keras.layers.Input(shape=(1,) , dtype=tf.string , name="""inputs""")
__SCREAMING_SNAKE_CASE = complete_model(lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = tf.keras.Model(lowerCAmelCase__ , lowerCAmelCase__)
keras_model.save(lowerCAmelCase__)
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {
"""do_sample""": True,
"""num_beams""": 1,
"""top_p""": 0.7,
"""top_k""": 1_0,
"""temperature""": 0.7,
}
__SCREAMING_SNAKE_CASE = 1_4
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
__SCREAMING_SNAKE_CASE = """Hello, my dog is cute and"""
__SCREAMING_SNAKE_CASE = tokenizer(lowerCAmelCase__ , return_tensors="""tf""")
__SCREAMING_SNAKE_CASE = TFAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-random-gpt2""")
__SCREAMING_SNAKE_CASE = 6_3_8
# forces the generation to happen on CPU, to avoid GPU-related quirks
with tf.device(""":/CPU:0"""):
tf.random.set_seed(0)
__SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
self.assertTrue(expectation == len(generated_tokens[0]))
__SCREAMING_SNAKE_CASE = [6_3_8, 1_9_8]
with tf.device(""":/CPU:0"""):
tf.random.set_seed(0)
__SCREAMING_SNAKE_CASE = model.generate(**lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
self.assertTrue(expectation == len(generated_tokens[0]))
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-random-bart""")
__SCREAMING_SNAKE_CASE = """Hugging Face is a technology company based in New York and Paris."""
__SCREAMING_SNAKE_CASE = bart_tokenizer(lowerCAmelCase__ , return_tensors="""tf""").input_ids
__SCREAMING_SNAKE_CASE = TFBartForConditionalGeneration.from_pretrained("""hf-internal-testing/tiny-random-bart""")
__SCREAMING_SNAKE_CASE = bart_model.generate(lowerCAmelCase__).numpy()
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__):
return super().call(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = FakeBart.from_pretrained("""hf-internal-testing/tiny-random-bart""")
__SCREAMING_SNAKE_CASE = bart_model.generate(lowerCAmelCase__ , foo="""bar""").numpy()
self.assertTrue(np.array_equal(lowerCAmelCase__ , lowerCAmelCase__))
class SCREAMING_SNAKE_CASE_ ( bart_model.model.encoder.__class__ ):
"""simple docstring"""
def snake_case_ ( self , lowerCAmelCase__ , **lowerCAmelCase__):
return super().call(lowerCAmelCase__ , **lowerCAmelCase__)
__SCREAMING_SNAKE_CASE = FakeEncoder(bart_model.config , bart_model.model.shared)
__SCREAMING_SNAKE_CASE = fake_encoder
# Normal generation still works (the output will be different because the encoder weights are different)
__SCREAMING_SNAKE_CASE = bart_model.generate(lowerCAmelCase__).numpy()
with self.assertRaises(lowerCAmelCase__):
# FakeEncoder.call() accepts **kwargs -> no filtering -> value error due to unexpected input "foo"
bart_model.generate(lowerCAmelCase__ , foo="""bar""")
| 100 | """simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : str=False , _UpperCamelCase : str=False ) -> Optional[Any]:
"""simple docstring"""
snake_case = 'backbone.' if is_semantic else ''
snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"""{prefix}blocks.{i}.norm1.weight""", f"""beit.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm1.bias""", f"""beit.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.weight""", f"""beit.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(f"""{prefix}blocks.{i}.attn.proj.bias""", f"""beit.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.weight""", f"""beit.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.norm2.bias""", f"""beit.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.weight""", f"""beit.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc1.bias""", f"""beit.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.weight""", f"""beit.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((f"""{prefix}blocks.{i}.mlp.fc2.bias""", f"""beit.encoder.layer.{i}.output.dense.bias""") )
# projection layer + position embeddings
rename_keys.extend(
[
(f"""{prefix}cls_token""", 'beit.embeddings.cls_token'),
(f"""{prefix}patch_embed.proj.weight""", 'beit.embeddings.patch_embeddings.projection.weight'),
(f"""{prefix}patch_embed.proj.bias""", 'beit.embeddings.patch_embeddings.projection.bias'),
(f"""{prefix}pos_embed""", 'beit.embeddings.position_embeddings'),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
('mask_token', 'beit.embeddings.mask_token'),
('norm.weight', 'layernorm.weight'),
('norm.bias', 'layernorm.bias'),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
('fc_norm.weight', 'beit.pooler.layernorm.weight'),
('fc_norm.bias', 'beit.pooler.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def lowerCAmelCase__ ( _UpperCamelCase : List[str] , _UpperCamelCase : Any , _UpperCamelCase : Tuple=False , _UpperCamelCase : Union[str, Any]=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
snake_case = 'backbone.' if is_semantic else ''
# queries, keys and values
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.qkv.weight""" )
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.q_bias""" )
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.attn.v_bias""" )
snake_case = in_proj_weight[
: config.hidden_size, :
]
snake_case = q_bias
snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case = in_proj_weight[
-config.hidden_size :, :
]
snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_1""" )
snake_case = state_dict.pop(f"""{prefix}blocks.{i}.gamma_2""" )
snake_case = gamma_a
snake_case = gamma_a
def lowerCAmelCase__ ( _UpperCamelCase : Any , _UpperCamelCase : Optional[int] , _UpperCamelCase : Dict ) -> Dict:
"""simple docstring"""
snake_case = dct.pop(_UpperCamelCase )
snake_case = val
def lowerCAmelCase__ ( ) -> int:
"""simple docstring"""
snake_case = 'http://images.cocodataset.org/val2017/000000039769.jpg'
snake_case = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__ ( _UpperCamelCase : List[Any] , _UpperCamelCase : Dict , _UpperCamelCase : Union[str, Any]=False ) -> Union[str, Any]:
"""simple docstring"""
snake_case = False if 'rvlcdip' in checkpoint_url else True
snake_case = BeitConfig(use_absolute_position_embeddings=_UpperCamelCase , use_mask_token=_UpperCamelCase )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
snake_case = 1_0_2_4
snake_case = 4_0_9_6
snake_case = 2_4
snake_case = 1_6
# labels
if "rvlcdip" in checkpoint_url:
snake_case = 1_6
snake_case = 'huggingface/label-files'
snake_case = 'rvlcdip-id2label.json'
snake_case = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
snake_case = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
snake_case = idalabel
snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
snake_case = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' )['model']
snake_case = create_rename_keys(_UpperCamelCase , has_lm_head=_UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase , has_lm_head=_UpperCamelCase )
# load HuggingFace model
snake_case = BeitForMaskedImageModeling(_UpperCamelCase ) if has_lm_head else BeitForImageClassification(_UpperCamelCase )
model.eval()
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=_UpperCamelCase )
snake_case = prepare_img()
snake_case = image_processor(images=_UpperCamelCase , return_tensors='pt' )
snake_case = encoding['pixel_values']
snake_case = model(_UpperCamelCase )
snake_case = outputs.logits
# verify logits
snake_case = [1, 1_6] if 'rvlcdip' in checkpoint_url else [1, 1_9_6, 8_1_9_2]
assert logits.shape == torch.Size(_UpperCamelCase ), "Shape of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(_UpperCamelCase )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_UpperCamelCase )
if push_to_hub:
if has_lm_head:
snake_case = 'dit-base' if 'base' in checkpoint_url else 'dit-large'
else:
snake_case = 'dit-base-finetuned-rvlcdip' if 'dit-b' in checkpoint_url else 'dit-large-finetuned-rvlcdip'
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCamelCase , )
model.push_to_hub(
repo_path_or_name=Path(_UpperCamelCase , _UpperCamelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCamelCase , )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 150 | 0 |
"""simple docstring"""
import math
def a__ ( lowerCAmelCase__ ):
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(lowerCAmelCase__ ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(lowerCAmelCase__ )
for i in range(start * start , end + 1 , lowerCAmelCase__ ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , lowerCAmelCase__ )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(lowerCAmelCase__ , high + 1 , lowerCAmelCase__ ):
UpperCAmelCase_ = False
for j in range(len(lowerCAmelCase__ ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , lowerCAmelCase__ )
return prime
print(sieve(10**6))
| 241 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase__ :
'''simple docstring'''
def __init__( self : Any , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any]=2 , _UpperCAmelCase : Any=True , _UpperCAmelCase : int=False , _UpperCAmelCase : str=10 , _UpperCAmelCase : int=3 , _UpperCAmelCase : Optional[int]=32 * 8 , _UpperCAmelCase : str=32 * 8 , _UpperCAmelCase : Any=4 , _UpperCAmelCase : List[Any]=64 , ) -> str:
'''simple docstring'''
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_auxiliary_loss
UpperCAmelCase_ = num_queries
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_size
UpperCAmelCase_ = max_size
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = hidden_dim
UpperCAmelCase_ = hidden_dim
def lowercase__ ( self : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_UpperCAmelCase )
UpperCAmelCase_ = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_UpperCAmelCase )
UpperCAmelCase_ = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_UpperCAmelCase ) > 0.5
).float()
UpperCAmelCase_ = (torch.rand((self.batch_size, self.num_labels) , device=_UpperCAmelCase ) > 0.5).long()
UpperCAmelCase_ = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Any ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
UpperCAmelCase_ = self.num_queries
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = [1, 1, 1, 1]
UpperCAmelCase_ = self.num_channels
UpperCAmelCase_ = 64
UpperCAmelCase_ = 128
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
UpperCAmelCase_ = self.hidden_dim
return config
def lowercase__ ( self : Dict ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values, "pixel_mask": pixel_mask}
return config, inputs_dict
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ = output.encoder_hidden_states
UpperCAmelCase_ = output.pixel_decoder_hidden_states
UpperCAmelCase_ = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_UpperCAmelCase ) , config.decoder_layers )
def lowercase__ ( self : Tuple , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int]=False ) -> str:
'''simple docstring'''
with torch.no_grad():
UpperCAmelCase_ = MaskaFormerModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_UpperCAmelCase , _UpperCAmelCase )
def lowercase__ ( self : Optional[Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
def comm_check_on_output(_UpperCAmelCase : List[Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
UpperCAmelCase_ = model(pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase )
UpperCAmelCase_ = model(_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
UpperCAmelCase_ = model(
pixel_values=_UpperCAmelCase , pixel_mask=_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
comm_check_on_output(_UpperCAmelCase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
UpperCamelCase = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
UpperCamelCase = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
UpperCamelCase = False
def lowercase__ ( self : Tuple ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def lowercase__ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_UpperCAmelCase )
@unittest.skip(reason="Mask2Former does not use inputs_embeds" )
def lowercase__ ( self : Tuple ) -> Any:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not have a get_input_embeddings method" )
def lowercase__ ( self : Tuple ) -> Optional[int]:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former is not a generative model" )
def lowercase__ ( self : str ) -> Dict:
'''simple docstring'''
pass
@unittest.skip(reason="Mask2Former does not use token embeddings" )
def lowercase__ ( self : Union[str, Any] ) -> Union[str, Any]:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
pass
def lowercase__ ( self : Tuple ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
@slow
def lowercase__ ( self : List[Any] ) -> Dict:
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def lowercase__ ( self : List[str] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = (self.model_tester.min_size,) * 2
UpperCAmelCase_ = {
"pixel_values": torch.randn((2, 3, *size) , device=_UpperCAmelCase ),
"mask_labels": torch.randn((2, 10, *size) , device=_UpperCAmelCase ),
"class_labels": torch.zeros(2 , 10 , device=_UpperCAmelCase ).long(),
}
UpperCAmelCase_ = self.model_tester.get_config()
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_UpperCAmelCase , **_UpperCAmelCase , output_hidden_states=_UpperCAmelCase )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
UpperCAmelCase_ = model(**_UpperCAmelCase , output_attentions=_UpperCAmelCase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase ).loss
loss.backward()
def lowercase__ ( self : str ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ = self.all_model_classes[1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCAmelCase_ = True
UpperCAmelCase_ = True
UpperCAmelCase_ = model_class(_UpperCAmelCase ).to(_UpperCAmelCase )
model.train()
UpperCAmelCase_ = model(_UpperCAmelCase , mask_labels=_UpperCAmelCase , class_labels=_UpperCAmelCase )
UpperCAmelCase_ = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
UpperCAmelCase_ = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_UpperCAmelCase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
lowerCamelCase = 1e-4
def a__ ( ):
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_vision
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowercase__ ( self : Optional[Any] ) -> int:
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase__ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase__ ( self : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
UpperCAmelCase_ = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
UpperCAmelCase_ = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_UpperCAmelCase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase__ ( self : int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(_UpperCAmelCase , return_tensors="pt" ).to(_UpperCAmelCase )
UpperCAmelCase_ = inputs["pixel_values"].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_UpperCAmelCase , (1, 3, 384, 384) )
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
# masks_queries_logits
UpperCAmelCase_ = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
UpperCAmelCase_ = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
UpperCAmelCase_ = torch.tensor(_UpperCAmelCase ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
# class_queries_logits
UpperCAmelCase_ = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
UpperCAmelCase_ = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def lowercase__ ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_UpperCAmelCase ).eval()
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] , segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] , return_tensors="pt" , )
UpperCAmelCase_ = inputs["pixel_values"].to(_UpperCAmelCase )
UpperCAmelCase_ = [el.to(_UpperCAmelCase ) for el in inputs["mask_labels"]]
UpperCAmelCase_ = [el.to(_UpperCAmelCase ) for el in inputs["class_labels"]]
with torch.no_grad():
UpperCAmelCase_ = model(**_UpperCAmelCase )
self.assertTrue(outputs.loss is not None )
| 241 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__A = logging.get_logger(__name__)
__A = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class _snake_case ( a__ ):
snake_case__ = "pix2struct_text_model"
snake_case__ = ["past_key_values"]
snake_case__ = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Any , UpperCAmelCase : Dict=50244 , UpperCAmelCase : str=768 , UpperCAmelCase : Union[str, Any]=64 , UpperCAmelCase : str=2048 , UpperCAmelCase : List[Any]=12 , UpperCAmelCase : str=12 , UpperCAmelCase : str=32 , UpperCAmelCase : Any=128 , UpperCAmelCase : Dict=0.1 , UpperCAmelCase : Tuple=1E-6 , UpperCAmelCase : Union[str, Any]=1.0 , UpperCAmelCase : List[str]="gelu_new" , UpperCAmelCase : Any=0 , UpperCAmelCase : Any=False , UpperCAmelCase : Optional[Any]=0 , UpperCAmelCase : Optional[Any]=1 , UpperCAmelCase : Optional[int]=False , UpperCAmelCase : str=True , **UpperCAmelCase : List[str] , ):
__lowerCamelCase : int = vocab_size
__lowerCamelCase : Tuple = hidden_size
__lowerCamelCase : Optional[Any] = d_kv
__lowerCamelCase : Union[str, Any] = d_ff
__lowerCamelCase : Any = num_layers
__lowerCamelCase : Optional[Any] = num_heads
__lowerCamelCase : Any = relative_attention_num_buckets
__lowerCamelCase : str = relative_attention_max_distance
__lowerCamelCase : int = dropout_rate
__lowerCamelCase : List[str] = layer_norm_epsilon
__lowerCamelCase : List[str] = initializer_factor
__lowerCamelCase : Union[str, Any] = use_cache
__lowerCamelCase : int = eos_token_id
__lowerCamelCase : Any = decoder_start_token_id
# for backwards compatibility
__lowerCamelCase : str = dense_act_fn
super().__init__(
pad_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase , decoder_start_token_id=UpperCAmelCase , tie_word_embeddings=UpperCAmelCase , is_decoder=UpperCAmelCase , **UpperCAmelCase , )
@classmethod
def lowerCamelCase__ ( cls : str , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : Optional[int] ):
cls._set_token_in_kwargs(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : str = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__lowerCamelCase : Any = config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _snake_case ( a__ ):
snake_case__ = "pix2struct_vision_model"
def __init__( self : Union[str, Any] , UpperCAmelCase : Tuple=768 , UpperCAmelCase : Tuple=768 , UpperCAmelCase : List[str]=2048 , UpperCAmelCase : str=64 , UpperCAmelCase : Union[str, Any]=12 , UpperCAmelCase : Any=12 , UpperCAmelCase : str="gelu_new" , UpperCAmelCase : List[str]=1E-6 , UpperCAmelCase : Dict=0.0 , UpperCAmelCase : Union[str, Any]=0.0 , UpperCAmelCase : int=1E-10 , UpperCAmelCase : Optional[int]=1.0 , UpperCAmelCase : Union[str, Any]=4096 , UpperCAmelCase : List[Any]=32 , UpperCAmelCase : str=128 , **UpperCAmelCase : Optional[int] , ):
super().__init__(**UpperCAmelCase )
__lowerCamelCase : Optional[Any] = hidden_size
__lowerCamelCase : Dict = patch_embed_hidden_size
__lowerCamelCase : List[Any] = d_ff
__lowerCamelCase : Optional[Any] = dropout_rate
__lowerCamelCase : Dict = num_hidden_layers
__lowerCamelCase : List[Any] = num_attention_heads
__lowerCamelCase : str = initializer_range
__lowerCamelCase : List[Any] = initializer_factor
__lowerCamelCase : str = attention_dropout
__lowerCamelCase : Optional[int] = layer_norm_eps
__lowerCamelCase : List[Any] = dense_act_fn
__lowerCamelCase : List[str] = seq_len
__lowerCamelCase : List[Any] = relative_attention_num_buckets
__lowerCamelCase : Any = relative_attention_max_distance
__lowerCamelCase : Optional[Any] = d_kv
@classmethod
def lowerCamelCase__ ( cls : Any , UpperCAmelCase : Union[str, os.PathLike] , **UpperCAmelCase : str ):
cls._set_token_in_kwargs(UpperCAmelCase )
__lowerCamelCase , __lowerCamelCase : int = cls.get_config_dict(UpperCAmelCase , **UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
__lowerCamelCase : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(UpperCAmelCase , **UpperCAmelCase )
class _snake_case ( a__ ):
snake_case__ = "pix2struct"
snake_case__ = True
def __init__( self : Any , UpperCAmelCase : Tuple=None , UpperCAmelCase : Optional[int]=None , UpperCAmelCase : List[str]=1.0 , UpperCAmelCase : Optional[int]=0.0_2 , UpperCAmelCase : Dict=False , UpperCAmelCase : List[str]=False , UpperCAmelCase : Union[str, Any]=True , **UpperCAmelCase : Optional[int] , ):
super().__init__(tie_word_embeddings=UpperCAmelCase , is_encoder_decoder=UpperCAmelCase , **UpperCAmelCase )
if text_config is None:
__lowerCamelCase : List[Any] = {}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
__lowerCamelCase : int = {}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
__lowerCamelCase : Dict = PixaStructTextConfig(**UpperCAmelCase )
__lowerCamelCase : List[str] = PixaStructVisionConfig(**UpperCAmelCase )
__lowerCamelCase : Tuple = self.text_config.decoder_start_token_id
__lowerCamelCase : int = self.text_config.pad_token_id
__lowerCamelCase : Dict = self.text_config.eos_token_id
__lowerCamelCase : str = initializer_factor
__lowerCamelCase : Union[str, Any] = initializer_range
__lowerCamelCase : List[Any] = self.initializer_range
__lowerCamelCase : Tuple = self.initializer_range
__lowerCamelCase : Dict = is_vqa
@classmethod
def lowerCamelCase__ ( cls : Dict , UpperCAmelCase : PixaStructTextConfig , UpperCAmelCase : PixaStructVisionConfig , **UpperCAmelCase : Tuple ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase )
def lowerCamelCase__ ( self : str ):
__lowerCamelCase : Dict = copy.deepcopy(self.__dict__ )
__lowerCamelCase : Optional[Any] = self.text_config.to_dict()
__lowerCamelCase : str = self.vision_config.to_dict()
__lowerCamelCase : Optional[Any] = self.__class__.model_type
return output | 135 | """simple docstring"""
import json
import os
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from requests.exceptions import HTTPError
from transformers.utils import (
CONFIG_NAME,
FLAX_WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TRANSFORMERS_CACHE,
WEIGHTS_NAME,
cached_file,
get_file_from_repo,
has_file,
)
__A = '''hf-internal-testing/tiny-random-bert'''
__A = os.path.join(TRANSFORMERS_CACHE, '''models--hf-internal-testing--tiny-random-bert''')
__A = '''9b8c223d42b2188cb49d29af482996f9d0f3e5a6'''
class _snake_case ( unittest.TestCase ):
def lowerCamelCase__ ( self : Any ):
__lowerCamelCase : Dict = cached_file(UpperCAmelCase , UpperCAmelCase )
# Should have downloaded the file in here
self.assertTrue(os.path.isdir(UpperCAmelCase ) )
# Cache should contain at least those three subfolders:
for subfolder in ["blobs", "refs", "snapshots"]:
self.assertTrue(os.path.isdir(os.path.join(UpperCAmelCase , UpperCAmelCase ) ) )
with open(os.path.join(UpperCAmelCase , "refs" , "main" ) ) as f:
__lowerCamelCase : Dict = f.read()
self.assertEqual(UpperCAmelCase , os.path.join(UpperCAmelCase , "snapshots" , UpperCAmelCase , UpperCAmelCase ) )
self.assertTrue(os.path.isfile(UpperCAmelCase ) )
# File is cached at the same place the second time.
__lowerCamelCase : Tuple = cached_file(UpperCAmelCase , UpperCAmelCase )
self.assertEqual(UpperCAmelCase , UpperCAmelCase )
# Using a specific revision to test the full commit hash.
__lowerCamelCase : List[str] = cached_file(UpperCAmelCase , UpperCAmelCase , revision="9b8c223" )
self.assertEqual(UpperCAmelCase , os.path.join(UpperCAmelCase , "snapshots" , UpperCAmelCase , UpperCAmelCase ) )
def lowerCamelCase__ ( self : List[str] ):
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid model identifier" ):
__lowerCamelCase : Optional[Any] = cached_file("tiny-random-bert" , UpperCAmelCase )
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid git identifier" ):
__lowerCamelCase : Dict = cached_file(UpperCAmelCase , UpperCAmelCase , revision="aaaa" )
with self.assertRaisesRegex(UpperCAmelCase , "does not appear to have a file named" ):
__lowerCamelCase : List[Any] = cached_file(UpperCAmelCase , "conf" )
def lowerCamelCase__ ( self : str ):
with self.assertRaisesRegex(UpperCAmelCase , "does not appear to have a file named" ):
__lowerCamelCase : Any = cached_file(UpperCAmelCase , "conf" )
with open(os.path.join(UpperCAmelCase , "refs" , "main" ) ) as f:
__lowerCamelCase : List[str] = f.read()
self.assertTrue(os.path.isfile(os.path.join(UpperCAmelCase , ".no_exist" , UpperCAmelCase , "conf" ) ) )
__lowerCamelCase : List[str] = cached_file(UpperCAmelCase , "conf" , _raise_exceptions_for_missing_entries=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
__lowerCamelCase : Optional[Any] = cached_file(UpperCAmelCase , "conf" , local_files_only=UpperCAmelCase , _raise_exceptions_for_missing_entries=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
__lowerCamelCase : str = mock.Mock()
__lowerCamelCase : Union[str, Any] = 500
__lowerCamelCase : Tuple = {}
__lowerCamelCase : Dict = HTTPError
__lowerCamelCase : Any = {}
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=UpperCAmelCase ) as mock_head:
__lowerCamelCase : Any = cached_file(UpperCAmelCase , "conf" , _raise_exceptions_for_connection_errors=UpperCAmelCase )
self.assertIsNone(UpperCAmelCase )
# This check we did call the fake head request
mock_head.assert_called()
def lowerCamelCase__ ( self : str ):
self.assertTrue(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
self.assertFalse(has_file("hf-internal-testing/tiny-bert-pt-only" , UpperCAmelCase ) )
def lowerCamelCase__ ( self : Any ):
# `get_file_from_repo` returns None if the file does not exist
self.assertIsNone(get_file_from_repo("bert-base-cased" , "ahah.txt" ) )
# The function raises if the repository does not exist.
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid model identifier" ):
get_file_from_repo("bert-base-case" , UpperCAmelCase )
# The function raises if the revision does not exist.
with self.assertRaisesRegex(UpperCAmelCase , "is not a valid git identifier" ):
get_file_from_repo("bert-base-cased" , UpperCAmelCase , revision="ahaha" )
__lowerCamelCase : str = get_file_from_repo("bert-base-cased" , UpperCAmelCase )
# The name is the cached name which is not very easy to test, so instead we load the content.
__lowerCamelCase : Tuple = json.loads(open(UpperCAmelCase , "r" ).read() )
self.assertEqual(config["hidden_size"] , 768 )
def lowerCamelCase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmp_dir:
__lowerCamelCase : Union[str, Any] = Path(UpperCAmelCase ) / "a.txt"
filename.touch()
self.assertEqual(get_file_from_repo(UpperCAmelCase , "a.txt" ) , str(UpperCAmelCase ) )
self.assertIsNone(get_file_from_repo(UpperCAmelCase , "b.txt" ) ) | 135 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE :str = BlipImageProcessor()
__SCREAMING_SNAKE_CASE :Union[str, Any] = GPTaTokenizer.from_pretrained('''hf-internal-testing/tiny-random-GPT2Model''' )
__SCREAMING_SNAKE_CASE :Optional[int] = BertTokenizerFast.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
__SCREAMING_SNAKE_CASE :str = InstructBlipProcessor(_lowerCAmelCase ,_lowerCAmelCase ,_lowerCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).tokenizer
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> List[str]:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).image_processor
def _UpperCamelCase ( self ,**SCREAMING_SNAKE_CASE__ ) -> Tuple:
"""simple docstring"""
return AutoProcessor.from_pretrained(self.tmpdirname ,**_lowerCAmelCase ).qformer_tokenizer
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[Any] = [np.random.randint(2_55 ,size=(3, 30, 4_00) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE :Union[str, Any] = [Image.fromarray(np.moveaxis(_lowerCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :int = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE :List[str] = self.get_tokenizer(bos_token='''(BOS)''' ,eos_token='''(EOS)''' )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_image_processor(do_normalize=_lowerCAmelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE :List[str] = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token='''(BOS)''' ,eos_token='''(EOS)''' ,do_normalize=_lowerCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_lowerCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_lowerCAmelCase )
self.assertIsInstance(processor.qformer_tokenizer ,_lowerCAmelCase )
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Dict = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :str = InstructBlipProcessor(
tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ,qformer_tokenizer=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Any = image_processor(_lowerCAmelCase ,return_tensors='''np''' )
__SCREAMING_SNAKE_CASE :int = processor(images=_lowerCAmelCase ,return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :Optional[int] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ,qformer_tokenizer=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :Any = """lower newer"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = processor(text=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :List[str] = tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :List[Any] = qformer_tokenizer(_lowerCAmelCase ,return_token_type_ids=_lowerCAmelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor['''qformer_''' + key] )
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Dict = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Tuple = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :Optional[int] = InstructBlipProcessor(
tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ,qformer_tokenizer=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :Union[str, Any] = """lower newer"""
__SCREAMING_SNAKE_CASE :List[Any] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :Optional[int] = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) ,['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] ,)
# test if it raises when no input is passed
with pytest.raises(_lowerCAmelCase ):
processor()
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_image_processor()
__SCREAMING_SNAKE_CASE :Any = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :List[str] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :int = InstructBlipProcessor(
tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ,qformer_tokenizer=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :Union[str, Any] = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE :Union[str, Any] = processor.batch_decode(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :List[Any] = tokenizer.batch_decode(_lowerCAmelCase )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
def _UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = self.get_image_processor()
__SCREAMING_SNAKE_CASE :List[Any] = self.get_tokenizer()
__SCREAMING_SNAKE_CASE :List[str] = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE :str = InstructBlipProcessor(
tokenizer=_lowerCAmelCase ,image_processor=_lowerCAmelCase ,qformer_tokenizer=_lowerCAmelCase )
__SCREAMING_SNAKE_CASE :Union[str, Any] = """lower newer"""
__SCREAMING_SNAKE_CASE :List[str] = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE :List[Any] = processor(text=_lowerCAmelCase ,images=_lowerCAmelCase )
self.assertListEqual(
list(inputs.keys() ) ,['''input_ids''', '''attention_mask''', '''qformer_input_ids''', '''qformer_attention_mask''', '''pixel_values'''] ,) | 370 |
"""simple docstring"""
def __lowerCamelCase ( a_ : int = 10 , a_ : int = 22 ) -> int:
__SCREAMING_SNAKE_CASE :Optional[int] = range(1 , a_ )
__SCREAMING_SNAKE_CASE :List[Any] = range(1 , a_ )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'{solution(1_0, 2_2) = }') | 239 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def __lowerCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
# Initialise PyTorch model
__lowerCamelCase = LxmertConfig.from_json_file(UpperCamelCase__ )
print(f"""Building PyTorch model from configuration: {config}""" )
__lowerCamelCase = LxmertForPreTraining(UpperCamelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Save pytorch-model
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , UpperCamelCase__ )
if __name__ == "__main__":
__UpperCAmelCase =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCAmelCase =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 67 | '''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class a__ ( UpperCAmelCase__ ):
lowerCamelCase : Optional[int] =["image_processor", "tokenizer"]
lowerCamelCase : Union[str, Any] ="LayoutLMv2ImageProcessor"
lowerCamelCase : int =("LayoutXLMTokenizer", "LayoutXLMTokenizerFast")
def __init__( self : Optional[int] , a : Any=None , a : Any=None , **a : Union[str, Any] ):
"""simple docstring"""
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , a , )
__lowerCamelCase = kwargs.pop('''feature_extractor''' )
__lowerCamelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(a , a )
def __call__( self : Tuple , a : Optional[int] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : Tuple , ):
"""simple docstring"""
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'''You cannot provide bounding boxes '''
'''if you initialized the image processor with apply_ocr set to True.''' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'''You cannot provide word labels if you initialized the image processor with apply_ocr set to True.''' )
if return_overflowing_tokens is True and return_offsets_mapping is False:
raise ValueError('''You cannot return overflowing tokens without returning the offsets mapping.''' )
# first, apply the image processor
__lowerCamelCase = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
__lowerCamelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
__lowerCamelCase = features['''words''']
__lowerCamelCase = self.tokenizer(
text=text if text is not None else features['''words'''] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['''boxes'''] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
__lowerCamelCase = features.pop('''pixel_values''' )
if return_overflowing_tokens is True:
__lowerCamelCase = self.get_overflowing_images(a , encoded_inputs['''overflow_to_sample_mapping'''] )
__lowerCamelCase = images
return encoded_inputs
def SCREAMING_SNAKE_CASE__ ( self : Tuple , a : Optional[Any] , a : str ):
"""simple docstring"""
__lowerCamelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'''Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'''
f""" {len(a )} and {len(a )}""" )
return images_with_overflow
def SCREAMING_SNAKE_CASE__ ( self : List[str] , *a : Optional[Any] , **a : Union[str, Any] ):
"""simple docstring"""
return self.tokenizer.batch_decode(*a , **a )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , *a : Union[str, Any] , **a : Tuple ):
"""simple docstring"""
return self.tokenizer.decode(*a , **a )
@property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return ["input_ids", "bbox", "attention_mask", "image"]
@property
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , a , )
return self.image_processor_class
@property
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , a , )
return self.image_processor
| 67 | 1 |
'''simple docstring'''
from __future__ import annotations
import numpy as np
from numpy import floataa
from numpy.typing import NDArray
def _a ( _lowercase : NDArray[floataa] , _lowercase : NDArray[floataa] , _lowercase : list[int] , _lowercase : int , ):
'''simple docstring'''
__UpperCAmelCase : int = coefficient_matrix.shape
__UpperCAmelCase : Union[str, Any] = constant_matrix.shape
if rowsa != colsa:
__UpperCAmelCase : List[Any] = F'Coefficient matrix dimensions must be nxn but received {rowsa}x{colsa}'
raise ValueError(_lowercase )
if colsa != 1:
__UpperCAmelCase : Optional[Any] = F'Constant matrix must be nx1 but received {rowsa}x{colsa}'
raise ValueError(_lowercase )
if rowsa != rowsa:
__UpperCAmelCase : List[str] = (
'''Coefficient and constant matrices dimensions must be nxn and nx1 but '''
F'received {rowsa}x{colsa} and {rowsa}x{colsa}'
)
raise ValueError(_lowercase )
if len(_lowercase ) != rowsa:
__UpperCAmelCase : List[str] = (
'''Number of initial values must be equal to number of rows in coefficient '''
F'matrix but received {len(_lowercase )} and {rowsa}'
)
raise ValueError(_lowercase )
if iterations <= 0:
raise ValueError('''Iterations must be at least 1''' )
__UpperCAmelCase : NDArray[floataa] = np.concatenate(
(coefficient_matrix, constant_matrix) , axis=1 )
__UpperCAmelCase : Dict = table.shape
strictly_diagonally_dominant(_lowercase )
# Iterates the whole matrix for given number of times
for _ in range(_lowercase ):
__UpperCAmelCase : Union[str, Any] = []
for row in range(_lowercase ):
__UpperCAmelCase : int = 0
for col in range(_lowercase ):
if col == row:
__UpperCAmelCase : Optional[Any] = table[row][col]
elif col == cols - 1:
__UpperCAmelCase : Optional[Any] = table[row][col]
else:
temp += (-1) * table[row][col] * init_val[col]
__UpperCAmelCase : Optional[Any] = (temp + val) / denom
new_val.append(_lowercase )
__UpperCAmelCase : List[str] = new_val
return [float(_lowercase ) for i in new_val]
def _a ( _lowercase : NDArray[floataa] ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = table.shape
__UpperCAmelCase : Optional[Any] = True
for i in range(0 , _lowercase ):
__UpperCAmelCase : Tuple = 0
for j in range(0 , cols - 1 ):
if i == j:
continue
else:
total += table[i][j]
if table[i][i] <= total:
raise ValueError('''Coefficient matrix is not strictly diagonally dominant''' )
return is_diagonally_dominant
# Test Cases
if __name__ == "__main__":
import doctest
doctest.testmod()
| 351 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__UpperCAmelCase :Dict = logging.get_logger(__name__)
__UpperCAmelCase :str = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__UpperCAmelCase :Optional[int] = {
"vocab_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/vocab.txt",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/vocab.txt",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt"
),
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt"
),
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt",
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json",
"bert-base-multilingual-uncased": (
"https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json"
),
"bert-base-multilingual-cased": (
"https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json"
),
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json"
),
"bert-base-cased-finetuned-mrpc": (
"https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-cased": (
"https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json"
),
"bert-base-german-dbmdz-uncased": (
"https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json"
),
"wietsedv/bert-base-dutch-cased": (
"https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json"
),
},
}
__UpperCAmelCase :Union[str, Any] = {
"bert-base-uncased": 5_1_2,
"bert-large-uncased": 5_1_2,
"bert-base-cased": 5_1_2,
"bert-large-cased": 5_1_2,
"bert-base-multilingual-uncased": 5_1_2,
"bert-base-multilingual-cased": 5_1_2,
"bert-base-chinese": 5_1_2,
"bert-base-german-cased": 5_1_2,
"bert-large-uncased-whole-word-masking": 5_1_2,
"bert-large-cased-whole-word-masking": 5_1_2,
"bert-large-uncased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-large-cased-whole-word-masking-finetuned-squad": 5_1_2,
"bert-base-cased-finetuned-mrpc": 5_1_2,
"bert-base-german-dbmdz-cased": 5_1_2,
"bert-base-german-dbmdz-uncased": 5_1_2,
"TurkuNLP/bert-base-finnish-cased-v1": 5_1_2,
"TurkuNLP/bert-base-finnish-uncased-v1": 5_1_2,
"wietsedv/bert-base-dutch-cased": 5_1_2,
}
__UpperCAmelCase :str = {
"bert-base-uncased": {"do_lower_case": True},
"bert-large-uncased": {"do_lower_case": True},
"bert-base-cased": {"do_lower_case": False},
"bert-large-cased": {"do_lower_case": False},
"bert-base-multilingual-uncased": {"do_lower_case": True},
"bert-base-multilingual-cased": {"do_lower_case": False},
"bert-base-chinese": {"do_lower_case": False},
"bert-base-german-cased": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking": {"do_lower_case": True},
"bert-large-cased-whole-word-masking": {"do_lower_case": False},
"bert-large-uncased-whole-word-masking-finetuned-squad": {"do_lower_case": True},
"bert-large-cased-whole-word-masking-finetuned-squad": {"do_lower_case": False},
"bert-base-cased-finetuned-mrpc": {"do_lower_case": False},
"bert-base-german-dbmdz-cased": {"do_lower_case": False},
"bert-base-german-dbmdz-uncased": {"do_lower_case": True},
"TurkuNLP/bert-base-finnish-cased-v1": {"do_lower_case": False},
"TurkuNLP/bert-base-finnish-uncased-v1": {"do_lower_case": True},
"wietsedv/bert-base-dutch-cased": {"do_lower_case": False},
}
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : Tuple = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Optional[Any] = BertTokenizer
def __init__( self : Optional[int] , snake_case : Union[str, Any]=None , snake_case : str=None , snake_case : Any=True , snake_case : Tuple="[UNK]" , snake_case : int="[SEP]" , snake_case : Optional[Any]="[PAD]" , snake_case : int="[CLS]" , snake_case : Optional[Any]="[MASK]" , snake_case : Union[str, Any]=True , snake_case : List[Any]=None , **snake_case : int , ) -> str:
super().__init__(
snake_case , tokenizer_file=snake_case , do_lower_case=snake_case , unk_token=snake_case , sep_token=snake_case , pad_token=snake_case , cls_token=snake_case , mask_token=snake_case , tokenize_chinese_chars=snake_case , strip_accents=snake_case , **snake_case , )
__UpperCAmelCase : List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , snake_case ) != do_lower_case
or normalizer_state.get('''strip_accents''' , snake_case ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , snake_case ) != tokenize_chinese_chars
):
__UpperCAmelCase : List[Any] = getattr(snake_case , normalizer_state.pop('''type''' ) )
__UpperCAmelCase : List[Any] = do_lower_case
__UpperCAmelCase : List[Any] = strip_accents
__UpperCAmelCase : str = tokenize_chinese_chars
__UpperCAmelCase : List[str] = normalizer_class(**snake_case )
__UpperCAmelCase : Optional[int] = do_lower_case
def lowerCamelCase__ ( self : List[Any] , snake_case : Tuple , snake_case : List[str]=None ) -> str:
__UpperCAmelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowerCamelCase__ ( self : str , snake_case : List[int] , snake_case : Optional[List[int]] = None ) -> List[int]:
__UpperCAmelCase : List[str] = [self.sep_token_id]
__UpperCAmelCase : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowerCamelCase__ ( self : List[Any] , snake_case : str , snake_case : Optional[str] = None ) -> Tuple[str]:
__UpperCAmelCase : Optional[int] = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case ) | 240 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("""TEST_SAGEMAKER""" , """False""" ) ) is not True , reason="""Skipping test because should only be run when releasing minor transformers version""" , )
@pytest.mark.usefixtures("""sm_env""" )
@parameterized_class(
[
{
"""framework""": """pytorch""",
"""script""": """run_glue_model_parallelism.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
{
"""framework""": """pytorch""",
"""script""": """run_glue.py""",
"""model_name_or_path""": """roberta-large""",
"""instance_type""": """ml.p3dn.24xlarge""",
"""results""": {"""train_runtime""": 1_6_0_0, """eval_accuracy""": 0.3, """eval_loss""": 1.2},
},
] )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> List[str]:
if self.framework == "pytorch":
subprocess.run(
F'''cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py'''.split() , encoding="utf-8" , check=UpperCamelCase__ , )
assert hasattr(self , "env" )
def _lowercase ( self , UpperCamelCase__ ) -> Dict:
# configuration for running training on smdistributed Model Parallel
lowerCamelCase : Any = {
"enabled": True,
"processes_per_host": 8,
}
lowerCamelCase : Union[str, Any] = {
"enabled": True,
"parameters": {
"microbatches": 4,
"placement_strategy": "spread",
"pipeline": "interleaved",
"optimize": "speed",
"partitions": 4,
"ddp": True,
},
}
lowerCamelCase : List[Any] = {"smdistributed": {"modelparallel": smp_options}, "mpi": mpi_options}
lowerCamelCase : Tuple = "trainer" if self.script == "run_glue.py" else "smtrainer"
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F'''{self.env.base_job_name}-{instance_count}-smp-{name_extension}''' , instance_count=UpperCamelCase__ , instance_type=self.instance_type , debugger_hook_config=UpperCamelCase__ , hyperparameters={
**self.env.hyperparameters,
"model_name_or_path": self.model_name_or_path,
"max_steps": 500,
} , metric_definitions=self.env.metric_definitions , distribution=UpperCamelCase__ , py_version="py36" , )
def _lowercase ( self , UpperCamelCase__ ) -> Optional[Any]:
TrainingJobAnalytics(UpperCamelCase__ ).export_csv(F'''{self.env.test_path}/{job_name}_metrics.csv''' )
@parameterized.expand([(1,)] )
def _lowercase ( self , UpperCamelCase__ ) -> Tuple:
# create estimator
lowerCamelCase : Optional[Any] = self.create_estimator(UpperCamelCase__ )
# run training
estimator.fit()
# result dataframe
lowerCamelCase : Any = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
lowerCamelCase : List[Any] = list(result_metrics_df[result_metrics_df.metric_name == "eval_accuracy"]["value"] )
lowerCamelCase : Any = list(result_metrics_df[result_metrics_df.metric_name == "eval_loss"]["value"] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
lowerCamelCase : Any = (
Session().describe_training_job(estimator.latest_training_job.name ).get("TrainingTimeInSeconds" , 99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results["eval_accuracy"] for t in eval_accuracy )
assert all(t <= self.results["eval_loss"] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F'''{estimator.latest_training_job.name}.json''' , "w" ) as outfile:
json.dump({"train_time": train_runtime, "eval_accuracy": eval_accuracy, "eval_loss": eval_loss} , UpperCamelCase__ )
| 48 |
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
A_ : List[Any] = current_set.copy()
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
A_ : List[str] = row[0]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
if magnitude == 0:
A_ : Union[str, Any] = column
continue
A_ : Dict = column / magnitude
# Subtract to cancel term
A_ : Union[str, Any] = current_set[0]
A_ : Tuple = [first_row]
A_ : int = current_set[1::]
for row in current_set:
A_ : Tuple = []
# If first term is 0, it is already in form we want, so we preserve it
if row[0] == 0:
final_set.append(SCREAMING_SNAKE_CASE )
continue
for column_index in range(len(SCREAMING_SNAKE_CASE ) ):
temp_row.append(first_row[column_index] - row[column_index] )
final_set.append(SCREAMING_SNAKE_CASE )
# Create next recursion iteration set
if len(final_set[0] ) != 3:
A_ : Optional[Any] = final_set[0]
A_ : Any = []
A_ : str = []
for row in final_set[1::]:
current_first_column.append(row[0] )
next_iteration.append(row[1::] )
A_ : Optional[Any] = simplify(SCREAMING_SNAKE_CASE )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
resultant[i].insert(0 , current_first_column[i] )
resultant.insert(0 , SCREAMING_SNAKE_CASE )
A_ : List[Any] = resultant
return final_set
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
if len(SCREAMING_SNAKE_CASE ) == 0:
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
A_ : str = len(SCREAMING_SNAKE_CASE ) + 1
if any(len(SCREAMING_SNAKE_CASE ) != _length for item in equations ):
raise IndexError('''solve_simultaneous() requires n lists of length n+1''' )
for row in equations:
if any(not isinstance(SCREAMING_SNAKE_CASE , (int, float) ) for column in row ):
raise ValueError('''solve_simultaneous() requires lists of integers''' )
if len(SCREAMING_SNAKE_CASE ) == 1:
return [equations[0][-1] / equations[0][0]]
A_ : Dict = equations.copy()
if any(0 in row for row in data_set ):
A_ : Tuple = data_set.copy()
A_ : Optional[Any] = []
for row_index, row in enumerate(SCREAMING_SNAKE_CASE ):
if 0 not in row:
A_ : str = data_set.pop(SCREAMING_SNAKE_CASE )
break
if not full_row:
raise ValueError('''solve_simultaneous() requires at least 1 full equation''' )
data_set.insert(0 , SCREAMING_SNAKE_CASE )
A_ : int = data_set.copy()
A_ : Dict = simplify(SCREAMING_SNAKE_CASE )
A_ : Dict = simplified[::-1]
A_ : list = []
for row in simplified:
A_ : Union[str, Any] = row[-1]
if not solutions:
if row[-2] == 0:
solutions.append(0 )
continue
solutions.append(current_solution / row[-2] )
continue
A_ : Optional[Any] = row.copy()[: len(SCREAMING_SNAKE_CASE ) - 1 :]
while temp_row[0] == 0:
temp_row.pop(0 )
if len(SCREAMING_SNAKE_CASE ) == 0:
solutions.append(0 )
continue
A_ : int = temp_row[1::]
A_ : int = temp_row[::-1]
for column_index, column in enumerate(SCREAMING_SNAKE_CASE ):
current_solution -= column * solutions[column_index]
solutions.append(SCREAMING_SNAKE_CASE )
A_ : Union[str, Any] = []
for item in solutions:
final.append(float(round(SCREAMING_SNAKE_CASE , 5 ) ) )
return final[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCamelCase = [
[2, 1, 1, 1, 1, 4],
[1, 2, 1, 1, 1, 5],
[1, 1, 2, 1, 1, 6],
[1, 1, 1, 2, 1, 7],
[1, 1, 1, 1, 2, 8],
]
print(solve_simultaneous(eq))
print(solve_simultaneous([[4, 2]]))
| 186 | 0 |
'''simple docstring'''
import math
import random
def a ( lowerCamelCase__ , lowerCamelCase__ = False ):
'''simple docstring'''
if deriv:
return value * (1 - value)
return 1 / (1 + math.exp(-value ))
# Initial Value
lowerCamelCase :List[str] = 0.02
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
A_ : List[str] = float(2 * (random.randint(1 , 1_00 )) - 1 )
for _ in range(lowerCamelCase__ ):
# Forward propagation
A_ : List[str] = sigmoid_function(INITIAL_VALUE * weight )
# How much did we miss?
A_ : Optional[int] = (expected / 1_00) - layer_a
# Error delta
A_ : Tuple = layer_1_error * sigmoid_function(lowerCamelCase__ , lowerCamelCase__ )
# Update weight
weight += INITIAL_VALUE * layer_1_delta
return layer_a * 1_00
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase :List[str] = int(input('''Expected value: '''))
lowerCamelCase :Optional[Any] = int(input('''Number of propagations: '''))
print(forward_propagation(expected, number_propagations)) | 135 |
'''simple docstring'''
def a ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
if density <= 0:
raise ValueError("""Impossible fluid density""" )
if bulk_modulus <= 0:
raise ValueError("""Impossible bulk modulus""" )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod() | 135 | 1 |
import unittest
from transformers import LiltConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
)
from transformers.models.lilt.modeling_lilt import LILT_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any] , _lowerCAmelCase : Any=1_3 , _lowerCAmelCase : Union[str, Any]=7 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Optional[int]=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : Any=True , _lowerCAmelCase : Union[str, Any]=9_9 , _lowerCAmelCase : List[str]=2_4 , _lowerCAmelCase : Any=2 , _lowerCAmelCase : Dict=6 , _lowerCAmelCase : Tuple=3_7 , _lowerCAmelCase : Any="gelu" , _lowerCAmelCase : Union[str, Any]=0.1 , _lowerCAmelCase : Tuple=0.1 , _lowerCAmelCase : int=5_1_2 , _lowerCAmelCase : Optional[Any]=1_6 , _lowerCAmelCase : List[Any]=2 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : List[str]=3 , _lowerCAmelCase : Optional[Any]=None , _lowerCAmelCase : Any=1_0_0_0 , ) -> List[Any]:
"""simple docstring"""
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_input_mask
snake_case_ = use_token_type_ids
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_act
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = type_vocab_size
snake_case_ = type_sequence_label_size
snake_case_ = initializer_range
snake_case_ = num_labels
snake_case_ = scope
snake_case_ = range_bbox
def lowerCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length, 4] , self.range_bbox )
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
snake_case_ = bbox[i, j, 3]
snake_case_ = bbox[i, j, 1]
snake_case_ = t
if bbox[i, j, 2] < bbox[i, j, 0]:
snake_case_ = bbox[i, j, 2]
snake_case_ = bbox[i, j, 0]
snake_case_ = t
snake_case_ = None
if self.use_input_mask:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case_ = None
if self.use_token_type_ids:
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case_ = None
snake_case_ = None
if self.use_labels:
snake_case_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case_ = self.get_config()
return config, input_ids, bbox, token_type_ids, input_mask, sequence_labels, token_labels
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
return LiltConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] , _lowerCAmelCase : int , ) -> str:
"""simple docstring"""
snake_case_ = LiltModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase , bbox=_lowerCAmelCase , token_type_ids=_lowerCAmelCase )
snake_case_ = model(_lowerCAmelCase , bbox=_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Dict , ) -> Any:
"""simple docstring"""
snake_case_ = self.num_labels
snake_case_ = LiltForTokenClassification(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , labels=_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : str , _lowerCAmelCase : Dict , _lowerCAmelCase : List[str] , _lowerCAmelCase : Optional[int] , _lowerCAmelCase : str , _lowerCAmelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
snake_case_ = LiltForQuestionAnswering(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
snake_case_ = model(
_lowerCAmelCase , bbox=_lowerCAmelCase , attention_mask=_lowerCAmelCase , token_type_ids=_lowerCAmelCase , start_positions=_lowerCAmelCase , end_positions=_lowerCAmelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
snake_case_ = self.prepare_config_and_inputs()
(
(
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) , (
snake_case_
) ,
) = config_and_inputs
snake_case_ = {
"input_ids": input_ids,
"bbox": bbox,
"token_type_ids": token_type_ids,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( a , a , a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
LiltModel,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'feature-extraction': LiltModel,
'question-answering': LiltForQuestionAnswering,
'text-classification': LiltForSequenceClassification,
'token-classification': LiltForTokenClassification,
'zero-shot': LiltForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def lowerCAmelCase__ ( self : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : str , _lowerCAmelCase : int , _lowerCAmelCase : int , _lowerCAmelCase : Tuple ) -> List[str]:
"""simple docstring"""
return True
def lowerCAmelCase__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
snake_case_ = LiltModelTester(self )
snake_case_ = ConfigTester(self , config_class=_lowerCAmelCase , hidden_size=3_7 )
def lowerCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self : Tuple ) -> Dict:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Optional[Any] ) -> Any:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case_ = type
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCAmelCase )
def lowerCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
snake_case_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
for model_name in LILT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case_ = LiltModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self : Union[str, Any] ) -> Optional[Any]:
"""simple docstring"""
snake_case_ = LiltModel.from_pretrained("SCUT-DLVCLab/lilt-roberta-en-base" ).to(_lowerCAmelCase )
snake_case_ = torch.tensor([[1, 2]] , device=_lowerCAmelCase )
snake_case_ = torch.tensor([[[1, 2, 3, 4], [5, 6, 7, 8]]] , device=_lowerCAmelCase )
# forward pass
with torch.no_grad():
snake_case_ = model(input_ids=_lowerCAmelCase , bbox=_lowerCAmelCase )
snake_case_ = torch.Size([1, 2, 7_6_8] )
snake_case_ = torch.tensor(
[[-0.0_653, 0.0_950, -0.0_061], [-0.0_545, 0.0_926, -0.0_324]] , device=_lowerCAmelCase , )
self.assertTrue(outputs.last_hidden_state.shape , _lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :, :3] , _lowerCAmelCase , atol=1e-3 ) )
| 159 |
def _lowerCAmelCase ( lowerCAmelCase_ :int | float | str )->tuple[int, int]:
'''simple docstring'''
try:
snake_case_ = float(lowerCAmelCase_ )
except ValueError:
raise ValueError("Please enter a valid number" )
snake_case_ = decimal - int(lowerCAmelCase_ )
if fractional_part == 0:
return int(lowerCAmelCase_ ), 1
else:
snake_case_ = len(str(lowerCAmelCase_ ).split("." )[1] )
snake_case_ = int(decimal * (10**number_of_frac_digits) )
snake_case_ = 10**number_of_frac_digits
snake_case_ , snake_case_ = denominator, numerator
while True:
snake_case_ = dividend % divisor
if remainder == 0:
break
snake_case_ , snake_case_ = divisor, remainder
snake_case_ , snake_case_ = numerator / divisor, denominator / divisor
return int(lowerCAmelCase_ ), int(lowerCAmelCase_ )
if __name__ == "__main__":
print(F'''{decimal_to_fraction(2) = }''')
print(F'''{decimal_to_fraction(89.0) = }''')
print(F'''{decimal_to_fraction('67') = }''')
print(F'''{decimal_to_fraction('45.0') = }''')
print(F'''{decimal_to_fraction(1.5) = }''')
print(F'''{decimal_to_fraction('6.25') = }''')
print(F'''{decimal_to_fraction('78td') = }''')
| 159 | 1 |
"""simple docstring"""
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class SCREAMING_SNAKE_CASE__ ( lowerCamelCase_ ):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
super().__init__(
features=lowerCAmelCase__ , cache_dir=lowerCAmelCase__ , keep_in_memory=lowerCAmelCase__ , streaming=lowerCAmelCase__ , num_proc=lowerCAmelCase__ , **lowerCAmelCase__ , )
UpperCAmelCase : List[Any] = Generator(
cache_dir=lowerCAmelCase__ , features=lowerCAmelCase__ , generator=lowerCAmelCase__ , gen_kwargs=lowerCAmelCase__ , **lowerCAmelCase__ , )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
if self.streaming:
UpperCAmelCase : Optional[int] = self.builder.as_streaming_dataset(split="""train""" )
# Build regular (map-style) dataset
else:
UpperCAmelCase : List[str] = None
UpperCAmelCase : Any = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : Union[str, Any] = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase__ , download_mode=lowerCAmelCase__ , verification_mode=lowerCAmelCase__ , base_path=lowerCAmelCase__ , num_proc=self.num_proc , )
UpperCAmelCase : int = self.builder.as_dataset(
split="""train""" , verification_mode=lowerCAmelCase__ , in_memory=self.keep_in_memory )
return dataset
| 366 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
A: int = logging.get_logger(__name__)
A: Any = {"vocab_file": "vocab.txt"}
A: Optional[int] = {
"vocab_file": {
"YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt",
"YituTech/conv-bert-medium-small": (
"https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"
),
"YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt",
}
}
A: Optional[int] = {
"YituTech/conv-bert-base": 5_1_2,
"YituTech/conv-bert-medium-small": 5_1_2,
"YituTech/conv-bert-small": 5_1_2,
}
A: int = {
"YituTech/conv-bert-base": {"do_lower_case": True},
"YituTech/conv-bert-medium-small": {"do_lower_case": True},
"YituTech/conv-bert-small": {"do_lower_case": True},
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_INIT_CONFIGURATION
__lowerCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ConvBertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="[UNK]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="[PAD]" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , tokenize_chinese_chars=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
UpperCAmelCase : int = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , _SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get("""strip_accents""" , _SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , _SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
UpperCAmelCase : Dict = getattr(_SCREAMING_SNAKE_CASE , normalizer_state.pop("""type""" ) )
UpperCAmelCase : str = do_lower_case
UpperCAmelCase : Optional[int] = strip_accents
UpperCAmelCase : List[str] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_SCREAMING_SNAKE_CASE )
UpperCAmelCase : int = do_lower_case
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None ) -> Tuple:
'''simple docstring'''
UpperCAmelCase : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : str = [self.sep_token_id]
UpperCAmelCase : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
UpperCAmelCase : Dict = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
| 76 | 0 |
def _SCREAMING_SNAKE_CASE ( lowercase : Any ):
'''simple docstring'''
lowerCamelCase_ = [0] * len(lowercase )
lowerCamelCase_ = []
lowerCamelCase_ = [1] * len(lowercase )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(lowercase ) ):
if indegree[i] == 0:
queue.append(lowercase )
while queue:
lowerCamelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
lowerCamelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(lowercase )
print(max(lowercase ) )
# Adjacency list of Graph
lowerCamelCase : Tuple = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 204 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : int = logging.get_logger(__name__)
lowerCamelCase : Optional[int] = {"vocab_file": "spiece.model", "tokenizer_file": "tokenizer.json"}
lowerCamelCase : Optional[int] = {
"vocab_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/spiece.model",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/spiece.model",
},
"tokenizer_file": {
"google/fnet-base": "https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json",
"google/fnet-large": "https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json",
},
}
lowerCamelCase : List[Any] = {
"google/fnet-base": 512,
"google/fnet-large": 512,
}
lowerCamelCase : Any = "▁"
class A( UpperCamelCase ):
'''simple docstring'''
UpperCamelCase = VOCAB_FILES_NAMES
UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase = ['''input_ids''', '''token_type_ids''']
UpperCamelCase = FNetTokenizer
def __init__( self : Optional[int] , A_ : Any=None , A_ : int=None , A_ : int=False , A_ : Optional[int]=True , A_ : List[Any]=True , A_ : Tuple="<unk>" , A_ : Optional[int]="[SEP]" , A_ : List[Any]="<pad>" , A_ : Optional[int]="[CLS]" , A_ : Optional[Any]="[MASK]" , **A_ : Dict , ) -> List[str]:
"""simple docstring"""
lowerCamelCase_ = (
AddedToken(A_ , lstrip=A_ , rstrip=A_ , normalized=A_ )
if isinstance(A_ , A_ )
else mask_token
)
super().__init__(
A_ , tokenizer_file=A_ , do_lower_case=A_ , remove_space=A_ , keep_accents=A_ , unk_token=A_ , sep_token=A_ , pad_token=A_ , cls_token=A_ , mask_token=A_ , **A_ , )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = remove_space
lowerCamelCase_ = keep_accents
lowerCamelCase_ = vocab_file
lowerCamelCase_ = False if not self.vocab_file else True
def a__ ( self : List[Any] , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def a__ ( self : Tuple , A_ : List[int] , A_ : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self : Dict , A_ : str , A_ : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(A_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase_ = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ):
copyfile(self.vocab_file , A_ )
return (out_vocab_file,)
| 204 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
lowerCamelCase__ : List[Any] = logging.get_logger(__name__)
lowerCamelCase__ : Dict = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
lowercase_ = "deberta-v2"
def __init__( self : Union[str, Any] , _lowerCAmelCase : Dict=128_100 , _lowerCAmelCase : List[str]=1_536 , _lowerCAmelCase : Optional[int]=24 , _lowerCAmelCase : List[str]=24 , _lowerCAmelCase : str=6_144 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : str=0.1 , _lowerCAmelCase : List[Any]=0.1 , _lowerCAmelCase : Any=512 , _lowerCAmelCase : Optional[Any]=0 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : Any=1E-7 , _lowerCAmelCase : List[str]=False , _lowerCAmelCase : int=-1 , _lowerCAmelCase : Tuple=0 , _lowerCAmelCase : Union[str, Any]=True , _lowerCAmelCase : Tuple=None , _lowerCAmelCase : List[str]=0 , _lowerCAmelCase : Optional[Any]="gelu" , **_lowerCAmelCase : int , ):
super().__init__(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = hidden_size
SCREAMING_SNAKE_CASE_ = num_hidden_layers
SCREAMING_SNAKE_CASE_ = num_attention_heads
SCREAMING_SNAKE_CASE_ = intermediate_size
SCREAMING_SNAKE_CASE_ = hidden_act
SCREAMING_SNAKE_CASE_ = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ = max_position_embeddings
SCREAMING_SNAKE_CASE_ = type_vocab_size
SCREAMING_SNAKE_CASE_ = initializer_range
SCREAMING_SNAKE_CASE_ = relative_attention
SCREAMING_SNAKE_CASE_ = max_relative_positions
SCREAMING_SNAKE_CASE_ = pad_token_id
SCREAMING_SNAKE_CASE_ = position_biased_input
# Backwards compatibility
if type(_lowerCAmelCase ) == str:
SCREAMING_SNAKE_CASE_ = [x.strip() for x in pos_att_type.lower().split('|' )]
SCREAMING_SNAKE_CASE_ = pos_att_type
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = layer_norm_eps
SCREAMING_SNAKE_CASE_ = kwargs.get('pooler_hidden_size' , _lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = pooler_dropout
SCREAMING_SNAKE_CASE_ = pooler_hidden_act
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Any ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'sequence'}
if self._config.type_vocab_size > 0:
return OrderedDict(
[('input_ids', dynamic_axis), ('attention_mask', dynamic_axis), ('token_type_ids', dynamic_axis)] )
else:
return OrderedDict([('input_ids', dynamic_axis), ('attention_mask', dynamic_axis)] )
@property
def lowerCAmelCase_ ( self : List[Any] ):
return 12
def lowerCAmelCase_ ( self : Any , _lowerCAmelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional["TensorType"] = None , _lowerCAmelCase : int = 3 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : int = 40 , _lowerCAmelCase : "PreTrainedTokenizerBase" = None , ):
SCREAMING_SNAKE_CASE_ = super().generate_dummy_inputs(preprocessor=_lowerCAmelCase , framework=_lowerCAmelCase )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs | 350 |
import random
from typing import Any
def UpperCAmelCase_ ( __UpperCAmelCase : list ) -> list[Any]:
for _ in range(len(__UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ = random.randint(0 , len(__UpperCAmelCase ) - 1 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = data[b], data[a]
return data
if __name__ == "__main__":
lowerCamelCase__ : Union[str, Any] = [0, 1, 2, 3, 4, 5, 6, 7]
lowerCamelCase__ : Optional[int] = ['python', 'says', 'hello', '!']
print('Fisher-Yates Shuffle:')
print('List', integers, strings)
print('FY Shuffle', fisher_yates_shuffle(integers), fisher_yates_shuffle(strings)) | 210 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.