code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
UpperCAmelCase_ : Any = '''Create a default config file for Accelerate with only a few flags set.'''
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Optional[int]="no" , __magic_name__ : str = default_json_config_file , __magic_name__ : bool = False ) -> str:
"""simple docstring"""
UpperCamelCase :Any = Path(__magic_name__ )
path.parent.mkdir(parents=__magic_name__ , exist_ok=__magic_name__ )
if path.exists():
print(
f"""Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.""" )
return False
UpperCamelCase :Dict = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f"""`mixed_precision` should be one of 'no', 'fp16', 'bf16', or 'fp8'. Received {mixed_precision}""" )
UpperCamelCase :Optional[Any] = {
"""compute_environment""": """LOCAL_MACHINE""",
"""mixed_precision""": mixed_precision,
}
if torch.cuda.is_available():
UpperCamelCase :Union[str, Any] = torch.cuda.device_count()
UpperCamelCase :List[Any] = num_gpus
UpperCamelCase :Dict = False
if num_gpus > 1:
UpperCamelCase :Any = """MULTI_GPU"""
else:
UpperCamelCase :Any = """NO"""
elif is_xpu_available() and use_xpu:
UpperCamelCase :Optional[Any] = torch.xpu.device_count()
UpperCamelCase :Optional[int] = num_xpus
UpperCamelCase :int = False
if num_xpus > 1:
UpperCamelCase :Union[str, Any] = """MULTI_XPU"""
else:
UpperCamelCase :Union[str, Any] = """NO"""
elif is_npu_available():
UpperCamelCase :List[Any] = torch.npu.device_count()
UpperCamelCase :Optional[Any] = num_npus
UpperCamelCase :Tuple = False
if num_npus > 1:
UpperCamelCase :Optional[Any] = """MULTI_NPU"""
else:
UpperCamelCase :List[Any] = """NO"""
else:
UpperCamelCase :Any = 0
UpperCamelCase :Optional[Any] = True
UpperCamelCase :Optional[Any] = 1
UpperCamelCase :List[str] = """NO"""
UpperCamelCase :int = ClusterConfig(**__magic_name__ )
config.to_json_file(__magic_name__ )
return path
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Dict , __magic_name__ : Tuple ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Dict = parser.add_parser("""default""" , parents=__magic_name__ , help=__magic_name__ , formatter_class=__magic_name__ )
parser.add_argument(
"""--config_file""" , default=__magic_name__ , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , dest="""save_location""" , )
parser.add_argument(
"""--mixed_precision""" , choices=["""no""", """fp16""", """bf16"""] , type=__magic_name__ , help="""Whether or not to use mixed precision training. """
"""Choose between FP16 and BF16 (bfloat16) training. """
"""BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.""" , default="""no""" , )
parser.set_defaults(func=__magic_name__ )
return parser
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : List[Any] ) -> List[str]:
"""simple docstring"""
UpperCamelCase :Optional[Any] = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f"""accelerate configuration saved at {config_file}""" )
| 38 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self : List[str] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Any=13 , __lowerCamelCase : Dict=3 , __lowerCamelCase : int=224 , __lowerCamelCase : Any=30 , __lowerCamelCase : Tuple=400 , __lowerCamelCase : int=True , __lowerCamelCase : List[str]=None , __lowerCamelCase : Any=True , __lowerCamelCase : Dict=[0.5, 0.5, 0.5] , __lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] , ):
UpperCamelCase :List[Any] = size if size is not None else {"""height""": 18, """width""": 18}
UpperCamelCase :str = parent
UpperCamelCase :Optional[int] = batch_size
UpperCamelCase :Dict = num_channels
UpperCamelCase :str = image_size
UpperCamelCase :Dict = min_resolution
UpperCamelCase :str = max_resolution
UpperCamelCase :Union[str, Any] = do_resize
UpperCamelCase :Optional[Any] = size
UpperCamelCase :Any = do_normalize
UpperCamelCase :Optional[Any] = image_mean
UpperCamelCase :Tuple = image_std
def _A ( self : int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( _a , unittest.TestCase ):
snake_case__ : List[Any] = ViTImageProcessor if is_vision_available() else None
def _A ( self : str ):
UpperCamelCase :Tuple = EfficientFormerImageProcessorTester(self )
@property
def _A ( self : List[str] ):
return self.image_proc_tester.prepare_image_processor_dict()
def _A ( self : int ):
UpperCamelCase :List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowerCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__lowerCamelCase , """size""" ) )
def _A ( self : Optional[int] ):
pass
def _A ( self : str ):
# Initialize image_processor
UpperCamelCase :Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase :Union[str, Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , Image.Image )
# Test not batched input
UpperCamelCase :List[str] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :List[Any] = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : Union[str, Any] ):
# Initialize image_processor
UpperCamelCase :Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase :List[Any] = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , numpify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , np.ndarray )
# Test not batched input
UpperCamelCase :Dict = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :Tuple = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
def _A ( self : List[Any] ):
# Initialize image_processor
UpperCamelCase :List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase :Any = prepare_image_inputs(self.image_proc_tester , equal_resolution=__lowerCamelCase , torchify=__lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(__lowerCamelCase , torch.Tensor )
# Test not batched input
UpperCamelCase :List[Any] = image_processor(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
# Test batched
UpperCamelCase :str = image_processor(__lowerCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) , )
| 38 | 1 |
"""simple docstring"""
import os
import shutil
import sys
import tempfile
import unittest
from pathlib import Path
import pytest
import transformers
from transformers import (
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoTokenizer,
BertConfig,
BertTokenizer,
BertTokenizerFast,
CTRLTokenizer,
GPTaTokenizer,
GPTaTokenizerFast,
PreTrainedTokenizerFast,
RobertaTokenizer,
RobertaTokenizerFast,
is_tokenizers_available,
)
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.auto.tokenization_auto import (
TOKENIZER_MAPPING,
get_tokenizer_config,
tokenizer_class_from_name,
)
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import (
DUMMY_DIFF_TOKENIZER_IDENTIFIER,
DUMMY_UNKNOWN_IDENTIFIER,
SMALL_MODEL_IDENTIFIER,
RequestCounter,
require_tokenizers,
slow,
)
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class _a ( unittest.TestCase ):
def snake_case ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = 0
@slow
def snake_case ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
for model_name in (x for x in BERT_PRETRAINED_CONFIG_ARCHIVE_MAP.keys() if "japanese" not in x):
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, (BertTokenizer, BertTokenizerFast) )
self.assertGreater(len(lowerCAmelCase__ ), 0 )
for model_name in GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP.keys():
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, (GPTaTokenizer, GPTaTokenizerFast) )
self.assertGreater(len(lowerCAmelCase__ ), 0 )
def snake_case ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 1_2 )
def snake_case ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, (RobertaTokenizer, RobertaTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 2_0 )
def snake_case ( self : List[Any] ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[str] = AutoConfig.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
# Check that tokenizer_type ≠ model_type
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__, config=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, (BertTokenizer, BertTokenizerFast) )
self.assertEqual(tokenizer.vocab_size, 1_2 )
def snake_case ( self : Tuple ) -> List[Any]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''', os.path.join(lowerCAmelCase__, '''vocab.txt''' ) )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__, tokenizer_type='''bert''', use_fast=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''', os.path.join(lowerCAmelCase__, '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''', os.path.join(lowerCAmelCase__, '''merges.txt''' ) )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__, tokenizer_type='''gpt2''', use_fast=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
@require_tokenizers
def snake_case ( self : int ) -> List[str]:
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.txt''', os.path.join(lowerCAmelCase__, '''vocab.txt''' ) )
_UpperCamelCase : str = AutoTokenizer.from_pretrained(lowerCAmelCase__, tokenizer_type='''bert''' )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
shutil.copy('''./tests/fixtures/vocab.json''', os.path.join(lowerCAmelCase__, '''vocab.json''' ) )
shutil.copy('''./tests/fixtures/merges.txt''', os.path.join(lowerCAmelCase__, '''merges.txt''' ) )
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__, tokenizer_type='''gpt2''' )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : List[str] ) -> int:
'''simple docstring'''
with pytest.raises(lowerCAmelCase__ ):
AutoTokenizer.from_pretrained('''./''', tokenizer_type='''xxx''' )
@require_tokenizers
def snake_case ( self : int ) -> List[Any]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
_UpperCamelCase : Optional[Any] = tokenizer_class.from_pretrained('''wietsedv/bert-base-dutch-cased''' )
self.assertIsInstance(lowerCAmelCase__, (BertTokenizer, BertTokenizerFast) )
if isinstance(lowerCAmelCase__, lowerCAmelCase__ ):
self.assertEqual(tokenizer.basic_tokenizer.do_lower_case, lowerCAmelCase__ )
else:
self.assertEqual(tokenizer.do_lower_case, lowerCAmelCase__ )
self.assertEqual(tokenizer.model_max_length, 5_1_2 )
@require_tokenizers
def snake_case ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
for tokenizer_class in [BertTokenizer, BertTokenizerFast, AutoTokenizer]:
with self.assertRaisesRegex(
lowerCAmelCase__, '''julien-c/herlolip-not-exists is not a local folder and is not a valid model identifier''', ):
_UpperCamelCase : Dict = tokenizer_class.from_pretrained('''julien-c/herlolip-not-exists''' )
def snake_case ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_UpperCamelCase : Dict = TOKENIZER_MAPPING.values()
_UpperCamelCase : List[str] = []
for slow_tok, fast_tok in tokenizers:
if slow_tok is not None:
tokenizer_names.append(slow_tok.__name__ )
if fast_tok is not None:
tokenizer_names.append(fast_tok.__name__ )
for tokenizer_name in tokenizer_names:
# must find the right class
tokenizer_class_from_name(lowerCAmelCase__ )
@require_tokenizers
def snake_case ( self : str ) -> int:
'''simple docstring'''
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''', use_fast=lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertIsInstance(AutoTokenizer.from_pretrained('''bert-base-cased''' ), lowerCAmelCase__ )
@require_tokenizers
def snake_case ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''distilbert-base-uncased''', do_lower_case=lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = '''Hello, world. How are you?'''
_UpperCamelCase : Tuple = tokenizer.tokenize(lowerCAmelCase__ )
self.assertEqual('''[UNK]''', tokens[0] )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained('''microsoft/mpnet-base''', do_lower_case=lowerCAmelCase__ )
_UpperCamelCase : Dict = tokenizer.tokenize(lowerCAmelCase__ )
self.assertEqual('''[UNK]''', tokens[0] )
@require_tokenizers
def snake_case ( self : Dict ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained('''robot-test/dummy-tokenizer-fast-with-model-config''' )
self.assertEqual(type(lowerCAmelCase__ ), lowerCAmelCase__ )
self.assertEqual(tokenizer.model_max_length, 5_1_2 )
self.assertEqual(tokenizer.vocab_size, 3_0_0_0_0 )
self.assertEqual(tokenizer.unk_token, '''[UNK]''' )
self.assertEqual(tokenizer.padding_side, '''right''' )
self.assertEqual(tokenizer.truncation_side, '''right''' )
def snake_case ( self : str ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, (BertTokenizer, BertTokenizerFast) )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Union[str, Any] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, tokenizer.__class__ )
self.assertEqual(tokenizera.vocab_size, 1_2 )
def snake_case ( self : str ) -> int:
'''simple docstring'''
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained('''ctrl''' )
# There is no fast CTRL so this always gives us a slow tokenizer.
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
def snake_case ( self : Any ) -> Tuple:
'''simple docstring'''
_UpperCamelCase : Tuple = get_tokenizer_config('''bert-base-cased''' )
_UpperCamelCase : List[Any] = config.pop('''_commit_hash''', lowerCAmelCase__ )
# If we ever update bert-base-cased tokenizer config, this dict here will need to be updated.
self.assertEqual(lowerCAmelCase__, {'''do_lower_case''': False} )
# This model does not have a tokenizer_config so we get back an empty dict.
_UpperCamelCase : List[str] = get_tokenizer_config(lowerCAmelCase__ )
self.assertDictEqual(lowerCAmelCase__, {} )
# A tokenizer saved with `save_pretrained` always creates a tokenizer config.
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = get_tokenizer_config(lowerCAmelCase__ )
# Check the class of the tokenizer was properly saved (note that it always saves the slow class).
self.assertEqual(config['''tokenizer_class'''], '''BertTokenizer''' )
def snake_case ( self : int ) -> Dict:
'''simple docstring'''
try:
AutoConfig.register('''custom''', lowerCAmelCase__ )
AutoTokenizer.register(lowerCAmelCase__, slow_tokenizer_class=lowerCAmelCase__ )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoTokenizer.register(lowerCAmelCase__, slow_tokenizer_class=lowerCAmelCase__ )
_UpperCamelCase : str = CustomTokenizer.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
@require_tokenizers
def snake_case ( self : List[Any] ) -> Tuple:
'''simple docstring'''
try:
AutoConfig.register('''custom''', lowerCAmelCase__ )
# Can register in two steps
AutoTokenizer.register(lowerCAmelCase__, slow_tokenizer_class=lowerCAmelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, None) )
AutoTokenizer.register(lowerCAmelCase__, fast_tokenizer_class=lowerCAmelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast) )
del TOKENIZER_MAPPING._extra_content[CustomConfig]
# Can register in one step
AutoTokenizer.register(
lowerCAmelCase__, slow_tokenizer_class=lowerCAmelCase__, fast_tokenizer_class=lowerCAmelCase__ )
self.assertEqual(TOKENIZER_MAPPING[CustomConfig], (CustomTokenizer, CustomTokenizerFast) )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(lowerCAmelCase__ ):
AutoTokenizer.register(lowerCAmelCase__, fast_tokenizer_class=lowerCAmelCase__ )
# We pass through a bert tokenizer fast cause there is no converter slow to fast for our new toknizer
# and that model does not have a tokenizer.json
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCamelCase : Optional[int] = BertTokenizerFast.from_pretrained(lowerCAmelCase__ )
bert_tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[Any] = CustomTokenizerFast.from_pretrained(lowerCAmelCase__ )
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Any = AutoTokenizer.from_pretrained(lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained(lowerCAmelCase__, use_fast=lowerCAmelCase__ )
self.assertIsInstance(lowerCAmelCase__, lowerCAmelCase__ )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(lowerCAmelCase__ ):
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''', trust_remote_code=lowerCAmelCase__ )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''', trust_remote_code=lowerCAmelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Optional[int] = AutoTokenizer.from_pretrained(lowerCAmelCase__, trust_remote_code=lowerCAmelCase__ )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizerFast''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__, '''NewTokenizerFast''' )
# Test we can also load the slow version
_UpperCamelCase : Optional[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''', trust_remote_code=lowerCAmelCase__, use_fast=lowerCAmelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
# Test tokenizer can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(lowerCAmelCase__ )
_UpperCamelCase : Any = AutoTokenizer.from_pretrained(lowerCAmelCase__, trust_remote_code=lowerCAmelCase__, use_fast=lowerCAmelCase__ )
self.assertEqual(reloaded_tokenizer.__class__.__name__, '''NewTokenizer''' )
self.assertTrue(reloaded_tokenizer.special_attribute_present )
else:
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
self.assertEqual(reloaded_tokenizer.__class__.__name__, '''NewTokenizer''' )
@require_tokenizers
def snake_case ( self : Any ) -> List[str]:
'''simple docstring'''
class _a ( _lowerCAmelCase ):
UpperCamelCase = False
class _a ( _lowerCAmelCase ):
UpperCamelCase = NewTokenizer
UpperCamelCase = False
try:
AutoConfig.register('''custom''', lowerCAmelCase__ )
AutoTokenizer.register(lowerCAmelCase__, slow_tokenizer_class=lowerCAmelCase__ )
AutoTokenizer.register(lowerCAmelCase__, fast_tokenizer_class=lowerCAmelCase__ )
# If remote code is not set, the default is to use local
_UpperCamelCase : Dict = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''' )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/test_dynamic_tokenizer''', use_fast=lowerCAmelCase__ )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote code is disabled, we load the local one.
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''', trust_remote_code=lowerCAmelCase__ )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizerFast''' )
self.assertFalse(tokenizer.special_attribute_present )
_UpperCamelCase : Tuple = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''', trust_remote_code=lowerCAmelCase__, use_fast=lowerCAmelCase__ )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
self.assertFalse(tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''', trust_remote_code=lowerCAmelCase__ )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizerFast''' )
self.assertTrue(tokenizer.special_attribute_present )
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer''', trust_remote_code=lowerCAmelCase__, use_fast=lowerCAmelCase__ )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
self.assertTrue(tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
def snake_case ( self : Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase : str = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''', trust_remote_code=lowerCAmelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizerFast''' )
# Test we can also load the slow version
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained(
'''hf-internal-testing/test_dynamic_tokenizer_legacy''', trust_remote_code=lowerCAmelCase__, use_fast=lowerCAmelCase__ )
self.assertTrue(tokenizer.special_attribute_present )
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
else:
self.assertEqual(tokenizer.__class__.__name__, '''NewTokenizer''' )
def snake_case ( self : Any ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__, '''bert-base is not a local folder and is not a valid model identifier''' ):
_UpperCamelCase : int = AutoTokenizer.from_pretrained('''bert-base''' )
def snake_case ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
with self.assertRaisesRegex(
lowerCAmelCase__, R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
_UpperCamelCase : str = AutoTokenizer.from_pretrained(lowerCAmelCase__, revision='''aaaaaa''' )
def snake_case ( self : int ) -> Any:
'''simple docstring'''
_UpperCamelCase : List[str] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
with RequestCounter() as counter:
_UpperCamelCase : List[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-bert''' )
self.assertEqual(counter.get_request_count, 0 )
self.assertEqual(counter.head_request_count, 1 )
self.assertEqual(counter.other_request_count, 0 )
| 128 |
"""simple docstring"""
from __future__ import annotations
import math
def a_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(_lowercase ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
return min(
minimax(depth + 1 , node_index * 2 , _lowercase , _lowercase , _lowercase ) , minimax(depth + 1 , node_index * 2 + 1 , _lowercase , _lowercase , _lowercase ) , )
def a_ ( ):
_UpperCamelCase : Dict = [90, 23, 6, 33, 21, 65, 123, 3_4423]
_UpperCamelCase : int = math.log(len(_lowercase ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , _lowercase , _lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 128 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : List[Any] = (
'''This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'''
'''It takes two arguments named `image` which should be the original image, and `label` which should be a text '''
'''describing the elements what should be identified in the segmentation mask. The tool returns the mask.'''
)
lowerCamelCase : Any = '''CIDAS/clipseg-rd64-refined'''
lowerCamelCase : Dict = '''image_segmenter'''
lowerCamelCase : Tuple = CLIPSegForImageSegmentation
lowerCamelCase : Union[str, Any] = ['''image''', '''text''']
lowerCamelCase : List[Any] = ['''image''']
def __init__( self : Any , *UpperCAmelCase__ : int , **UpperCAmelCase__ : Optional[Any] ) -> Optional[int]:
requires_backends(self , ['vision'] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[Any] , UpperCAmelCase__ : "Image" , UpperCAmelCase__ : str ) -> Optional[Any]:
return self.pre_processor(text=[label] , images=[image] , padding=UpperCAmelCase__ , return_tensors='pt' )
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Dict ) -> Tuple:
with torch.no_grad():
lowerCAmelCase = self.model(**UpperCAmelCase__ ).logits
return logits
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : Tuple ) -> Any:
lowerCAmelCase = outputs.cpu().detach().numpy()
lowerCAmelCase = 0
lowerCAmelCase = 1
return Image.fromarray((array * 2_5_5).astype(np.uinta ) )
| 4 |
def __lowercase ( a__ ) -> bool:
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase__ : Optional[Any] =int(input('''Enter number: ''').strip())
print(F'''{number} is {'' if perfect(number) else 'not '}a Perfect Number.''')
| 257 | 0 |
import socket
def _snake_case( ) -> List[Any]:
'''simple docstring'''
A__ = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
A__ = socket.gethostname()
A__ = 12312
sock.connect((host, port) )
sock.send(b'Hello server!' )
with open('Received_file' , 'wb' ) as out_file:
print('File opened' )
print('Receiving data...' )
while True:
A__ = sock.recv(1024 )
if not data:
break
out_file.write(SCREAMING_SNAKE_CASE_ )
print('Successfully received the file' )
sock.close()
print('Connection closed' )
if __name__ == "__main__":
main()
| 366 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
lowercase_ = (3, 9, -11, 0, 7, 5, 1, -1)
lowercase_ = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class A :
"""simple docstring"""
lowerCamelCase = 42
lowerCamelCase = 42
class A :
"""simple docstring"""
def __init__( self : List[str],lowercase_ : Iterable[int] )-> None:
'''simple docstring'''
A__ = None
for i in sorted(lowercase_,reverse=lowercase_ ):
A__ = Node(lowercase_,self.head )
def __iter__( self : List[str] )-> Iterator[int]:
'''simple docstring'''
A__ = self.head
while node:
yield node.data
A__ = node.next_node
def __len__( self : str )-> int:
'''simple docstring'''
return sum(1 for _ in self )
def __str__( self : Optional[int] )-> str:
'''simple docstring'''
return " -> ".join([str(lowercase_ ) for node in self] )
def _snake_case( SCREAMING_SNAKE_CASE__ : SortedLinkedList , SCREAMING_SNAKE_CASE__ : SortedLinkedList ) -> SortedLinkedList:
'''simple docstring'''
return SortedLinkedList(list(SCREAMING_SNAKE_CASE__ ) + list(SCREAMING_SNAKE_CASE__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase_ = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 282 | 0 |
from ....utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( lowercase__ ):
def __init__( self : str , __lowercase : List[Any] , __lowercase : Optional[Any]=None , __lowercase : Any=2048 ):
'''simple docstring'''
__a = config.__dict__
__a = modal_hidden_size
if num_labels:
__a = num_labels
| 302 |
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a : Dict = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a : Any = typing.Union[np.floataa, int, float] # noqa: UP007
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return np.sqrt(np.sum((np.asarray(__lowerCamelCase ) - np.asarray(__lowerCamelCase )) ** 2 ) )
def lowerCamelCase__ ( __lowerCamelCase : Vector , __lowerCamelCase : Vector ):
return sum((va - va) ** 2 for va, va in zip(__lowerCamelCase , __lowerCamelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def lowerCamelCase__ ( ):
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 114 | 0 |
"""simple docstring"""
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Dict = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=5_1_2,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def lowerCAmelCase_( lowercase_ : List[str] ) -> int:
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
__SCREAMING_SNAKE_CASE : Any = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 73 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class lowerCamelCase_:
'''simple docstring'''
def __init__( self ):
_lowerCamelCase = ''''''
_lowerCamelCase = ''''''
_lowerCamelCase = []
_lowerCamelCase = 0
_lowerCamelCase = 2_5_6
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
_lowerCamelCase = 0
def snake_case__ ( self , lowerCamelCase__ ):
_lowerCamelCase = cva.imread(lowerCamelCase__ , 0 )
_lowerCamelCase = copy.deepcopy(self.img )
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] , label='''x''' )
_lowerCamelCase = np.sum(lowerCamelCase__ )
for i in range(len(lowerCamelCase__ ) ):
_lowerCamelCase = x[i] / self.k
self.sk += prk
_lowerCamelCase = (self.L - 1) * self.sk
if self.rem != 0:
_lowerCamelCase = int(last % last )
_lowerCamelCase = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCamelCase__ )
_lowerCamelCase = int(np.ma.count(self.img ) / self.img[1].size )
_lowerCamelCase = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
_lowerCamelCase = self.img[j][i]
if num != self.last_list[num]:
_lowerCamelCase = self.last_list[num]
cva.imwrite('''output_data/output.jpg''' , self.img )
def snake_case__ ( self ):
plt.hist(self.img.ravel() , 2_5_6 , [0, 2_5_6] )
def snake_case__ ( self ):
cva.imshow('''Output-Image''' , self.img )
cva.imshow('''Input-Image''' , self.original_image )
cva.waitKey(5_0_0_0 )
cva.destroyAllWindows()
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : int = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
__SCREAMING_SNAKE_CASE : List[Any] = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 73 | 1 |
'''simple docstring'''
import torch
from torch import nn
class _lowercase ( nn.Module ):
def __init__( self: Optional[Any] , UpperCamelCase__: str , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Tuple , UpperCamelCase__: Any , UpperCamelCase__: int=1 , UpperCamelCase__: List[str]=False ):
super().__init__()
lowerCamelCase__ : Union[str, Any] = n_token
lowerCamelCase__ : List[str] = d_embed
lowerCamelCase__ : Dict = d_proj
lowerCamelCase__ : Optional[Any] = cutoffs + [n_token]
lowerCamelCase__ : List[Any] = [0] + self.cutoffs
lowerCamelCase__ : Tuple = div_val
lowerCamelCase__ : str = self.cutoffs[0]
lowerCamelCase__ : int = len(self.cutoffs ) - 1
lowerCamelCase__ : List[str] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
lowerCamelCase__ : Optional[Any] = nn.Parameter(torch.zeros(self.n_clusters , self.d_embed ) )
lowerCamelCase__ : Dict = nn.Parameter(torch.zeros(self.n_clusters ) )
lowerCamelCase__ : Dict = nn.ModuleList()
lowerCamelCase__ : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
else:
self.out_projs.append(UpperCamelCase__ )
self.out_layers.append(nn.Linear(UpperCamelCase__ , UpperCamelCase__ ) )
else:
for i in range(len(self.cutoffs ) ):
lowerCamelCase__ , lowerCamelCase__ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ : int = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(UpperCamelCase__ , UpperCamelCase__ ) ) )
self.out_layers.append(nn.Linear(UpperCamelCase__ , r_idx - l_idx ) )
lowerCamelCase__ : Tuple = keep_order
def lowerCamelCase_ ( self: str , UpperCamelCase__: str , UpperCamelCase__: Dict , UpperCamelCase__: Union[str, Any] , UpperCamelCase__: Union[str, Any] ):
if proj is None:
lowerCamelCase__ : List[Any] = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
lowerCamelCase__ : int = nn.functional.linear(UpperCamelCase__ , proj.t().contiguous() )
lowerCamelCase__ : Dict = nn.functional.linear(UpperCamelCase__ , UpperCamelCase__ , bias=UpperCamelCase__ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def lowerCamelCase_ ( self: Optional[Any] , UpperCamelCase__: List[Any] , UpperCamelCase__: List[str]=None , UpperCamelCase__: Tuple=False ):
if labels is not None:
# Shift so that tokens < n predict n
lowerCamelCase__ : Dict = hidden[..., :-1, :].contiguous()
lowerCamelCase__ : List[Any] = labels[..., 1:].contiguous()
lowerCamelCase__ : List[Any] = hidden.view(-1 , hidden.size(-1 ) )
lowerCamelCase__ : int = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
lowerCamelCase__ : Optional[Any] = hidden.view(-1 , hidden.size(-1 ) )
if self.n_clusters == 0:
lowerCamelCase__ : str = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
if labels is not None:
lowerCamelCase__ : Optional[Any] = labels != -100
lowerCamelCase__ : Tuple = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase__ : Union[str, Any] = (
-nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )[mask].gather(1 , labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
lowerCamelCase__ : Union[str, Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase__ , lowerCamelCase__ : Tuple = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase__ , lowerCamelCase__ : str = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase__ : str = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase__ : Union[str, Any] = self.out_layers[i].weight
lowerCamelCase__ : Tuple = self.out_layers[i].bias
if i == 0:
lowerCamelCase__ : int = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase__ : Union[str, Any] = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = weights[0], biases[0], self.out_projs[0]
lowerCamelCase__ : int = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Any = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
if labels is None:
lowerCamelCase__ : Optional[int] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
lowerCamelCase__ : str = torch.zeros_like(UpperCamelCase__ , dtype=hidden.dtype , device=hidden.device )
lowerCamelCase__ : Union[str, Any] = 0
lowerCamelCase__ : Union[str, Any] = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
lowerCamelCase__ : int = (labels >= l_idx) & (labels < r_idx)
lowerCamelCase__ : Union[str, Any] = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
lowerCamelCase__ : Optional[int] = labels.index_select(0 , UpperCamelCase__ ) - l_idx
lowerCamelCase__ : Optional[int] = head_logprob.index_select(0 , UpperCamelCase__ )
lowerCamelCase__ : str = hidden.index_select(0 , UpperCamelCase__ )
else:
lowerCamelCase__ : str = hidden
if i == 0:
if labels is not None:
lowerCamelCase__ : List[Any] = head_logprob_i.gather(1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase__ : Tuple = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : int = weights[i], biases[i], self.out_projs[i]
lowerCamelCase__ : Any = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
lowerCamelCase__ : Optional[Any] = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
lowerCamelCase__ : Union[str, Any] = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1 , target_i[:, None] ).squeeze(1 )
else:
lowerCamelCase__ : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
lowerCamelCase__ : Dict = logprob_i
if labels is not None:
if (hasattr(self , """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0 , UpperCamelCase__ , -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def lowerCamelCase_ ( self: List[str] , UpperCamelCase__: Tuple ):
if self.n_clusters == 0:
lowerCamelCase__ : Tuple = self._compute_logit(UpperCamelCase__ , self.out_layers[0].weight , self.out_layers[0].bias , self.out_projs[0] )
return nn.functional.log_softmax(UpperCamelCase__ , dim=-1 )
else:
# construct weights and biases
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
lowerCamelCase__ , lowerCamelCase__ : Dict = self.cutoff_ends[i], self.cutoff_ends[i + 1]
lowerCamelCase__ : int = self.out_layers[0].weight[l_idx:r_idx]
lowerCamelCase__ : List[Any] = self.out_layers[0].bias[l_idx:r_idx]
else:
lowerCamelCase__ : List[str] = self.out_layers[i].weight
lowerCamelCase__ : Optional[Any] = self.out_layers[i].bias
if i == 0:
lowerCamelCase__ : Union[str, Any] = torch.cat([weight_i, self.cluster_weight] , dim=0 )
lowerCamelCase__ : str = torch.cat([bias_i, self.cluster_bias] , dim=0 )
weights.append(UpperCamelCase__ )
biases.append(UpperCamelCase__ )
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = weights[0], biases[0], self.out_projs[0]
lowerCamelCase__ : Tuple = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : int = hidden.new_empty((head_logit.size(0 ), self.n_token) )
lowerCamelCase__ : Optional[Any] = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
lowerCamelCase__ : List[str] = [0] + self.cutoffs
for i in range(len(UpperCamelCase__ ) - 1 ):
lowerCamelCase__ , lowerCamelCase__ : Tuple = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
lowerCamelCase__ : Union[str, Any] = head_logprob[:, : self.cutoffs[0]]
else:
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = weights[i], biases[i], self.out_projs[i]
lowerCamelCase__ : List[Any] = self._compute_logit(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Optional[int] = nn.functional.log_softmax(UpperCamelCase__ , dim=1 )
lowerCamelCase__ : List[str] = head_logprob[:, -i] + tail_logprob_i
lowerCamelCase__ : int = logprob_i
return out
| 41 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase : Tuple = logging.get_logger(__name__)
lowerCamelCase : Dict = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = """cvt"""
def __init__(self : int , UpperCamelCase : List[Any]=3 , UpperCamelCase : int=[7, 3, 3] , UpperCamelCase : str=[4, 2, 2] , UpperCamelCase : Dict=[2, 1, 1] , UpperCamelCase : Dict=[64, 192, 384] , UpperCamelCase : Dict=[1, 3, 6] , UpperCamelCase : Dict=[1, 2, 10] , UpperCamelCase : Any=[4.0, 4.0, 4.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : Optional[int]=[0.0, 0.0, 0.0] , UpperCamelCase : int=[0.0, 0.0, 0.1] , UpperCamelCase : Any=[True, True, True] , UpperCamelCase : int=[False, False, True] , UpperCamelCase : Union[str, Any]=["dw_bn", "dw_bn", "dw_bn"] , UpperCamelCase : Optional[int]=[3, 3, 3] , UpperCamelCase : Tuple=[1, 1, 1] , UpperCamelCase : Any=[2, 2, 2] , UpperCamelCase : Dict=[1, 1, 1] , UpperCamelCase : List[str]=[1, 1, 1] , UpperCamelCase : str=0.02 , UpperCamelCase : int=1E-12 , **UpperCamelCase : Union[str, Any] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase )
lowercase__ = num_channels
lowercase__ = patch_sizes
lowercase__ = patch_stride
lowercase__ = patch_padding
lowercase__ = embed_dim
lowercase__ = num_heads
lowercase__ = depth
lowercase__ = mlp_ratio
lowercase__ = attention_drop_rate
lowercase__ = drop_rate
lowercase__ = drop_path_rate
lowercase__ = qkv_bias
lowercase__ = cls_token
lowercase__ = qkv_projection_method
lowercase__ = kernel_qkv
lowercase__ = padding_kv
lowercase__ = stride_kv
lowercase__ = padding_q
lowercase__ = stride_q
lowercase__ = initializer_range
lowercase__ = layer_norm_eps
| 2 | 0 |
import argparse
import json
import os
from tensorflow.core.protobuf.saved_model_pba import SavedModel
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
__lowercase = '''.'''
# Internal TensorFlow ops that can be safely ignored (mostly specific to a saved model)
__lowercase = [
'''Assert''',
'''AssignVariableOp''',
'''EmptyTensorList''',
'''MergeV2Checkpoints''',
'''ReadVariableOp''',
'''ResourceGather''',
'''RestoreV2''',
'''SaveV2''',
'''ShardedFilename''',
'''StatefulPartitionedCall''',
'''StaticRegexFullMatch''',
'''VarHandleOp''',
]
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
"""simple docstring"""
snake_case__ : int = SavedModel()
snake_case__ : Optional[int] = []
with open(os.path.join(__lowerCAmelCase , '''utils''' , '''tf_ops''' , '''onnx.json''' ) ) as f:
snake_case__ : Tuple = json.load(__lowerCAmelCase )["""opsets"""]
for i in range(1 , opset + 1 ):
onnx_ops.extend(onnx_opsets[str(__lowerCAmelCase )] )
with open(__lowerCAmelCase , '''rb''' ) as f:
saved_model.ParseFromString(f.read() )
snake_case__ : Optional[int] = set()
# Iterate over every metagraph in case there is more than one (a saved model can contain multiple graphs)
for meta_graph in saved_model.meta_graphs:
# Add operations in the graph definition
model_op_names.update(node.op for node in meta_graph.graph_def.node )
# Go through the functions in the graph definition
for func in meta_graph.graph_def.library.function:
# Add operations in each function
model_op_names.update(node.op for node in func.node_def )
# Convert to list, sorted if you want
snake_case__ : int = sorted(__lowerCAmelCase )
snake_case__ : str = []
for op in model_op_names:
if op not in onnx_ops and op not in INTERNAL_OPS:
incompatible_ops.append(__lowerCAmelCase )
if strict and len(__lowerCAmelCase ) > 0:
raise Exception(f"""Found the following incompatible ops for the opset {opset}:\n""" + incompatible_ops )
elif len(__lowerCAmelCase ) > 0:
print(f"""Found the following incompatible ops for the opset {opset}:""" )
print(*__lowerCAmelCase , sep='''\n''' )
else:
print(f"""The saved model {saved_model_path} can properly be converted with ONNX.""" )
if __name__ == "__main__":
__lowercase = argparse.ArgumentParser()
parser.add_argument('''--saved_model_path''', help='''Path of the saved model to check (the .pb file).''')
parser.add_argument(
'''--opset''', default=12, type=int, help='''The ONNX opset against which the model has to be tested.'''
)
parser.add_argument(
'''--framework''', choices=['''onnx'''], default='''onnx''', help='''Frameworks against which to test the saved model.'''
)
parser.add_argument(
'''--strict''', action='''store_true''', help='''Whether make the checking strict (raise errors) or not (raise warnings)'''
)
__lowercase = parser.parse_args()
if args.framework == "onnx":
onnx_compliancy(args.saved_model_path, args.strict, args.opset)
| 367 |
from manim import *
class a ( __lowerCamelCase ):
def __lowerCamelCase ( self :Union[str, Any] ):
snake_case__ : Optional[Any] = Rectangle(height=0.5 ,width=0.5 )
snake_case__ : Optional[int] = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0 )
snake_case__ : Optional[Any] = Rectangle(height=0.25 ,width=0.25 )
snake_case__ : Tuple = [mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = [mem.copy() for i in range(6 )]
snake_case__ : List[str] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : List[Any] = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : List[Any] = Text('''CPU''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__lowercase )
snake_case__ : Union[str, Any] = [mem.copy() for i in range(4 )]
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : int = Text('''GPU''' ,font_size=2_4 )
snake_case__ : Any = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
gpu.move_to([-1, -1, 0] )
self.add(__lowercase )
snake_case__ : Optional[Any] = [mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Optional[Any] = Text('''Model''' ,font_size=2_4 )
snake_case__ : Dict = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
model.move_to([3, -1.0, 0] )
self.add(__lowercase )
snake_case__ : List[str] = []
snake_case__ : int = []
for i, rect in enumerate(__lowercase ):
snake_case__ : Dict = fill.copy().set_fill(__lowercase ,opacity=0.8 )
target.move_to(__lowercase )
model_arr.append(__lowercase )
snake_case__ : Dict = Rectangle(height=0.46 ,width=0.46 ).set_stroke(width=0.0 ).set_fill(__lowercase ,opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(__lowercase )
self.add(*__lowercase ,*__lowercase )
snake_case__ : Tuple = [meta_mem.copy() for i in range(6 )]
snake_case__ : Optional[int] = [meta_mem.copy() for i in range(6 )]
snake_case__ : str = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Union[str, Any] = VGroup(*__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Tuple = VGroup(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0 )
snake_case__ : Dict = Text('''Disk''' ,font_size=2_4 )
snake_case__ : Optional[Any] = Group(__lowercase ,__lowercase ).arrange(__lowercase ,buff=0.5 ,aligned_edge=__lowercase )
disk.move_to([-4, -1.25, 0] )
self.add(__lowercase ,__lowercase )
snake_case__ : Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
snake_case__ : Union[str, Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" ,font_size=1_8 ,)
key_text.move_to([-5, 2.4, 0] )
self.add(__lowercase ,__lowercase )
snake_case__ : Any = MarkupText(
F"""<span fgcolor='{BLUE}'>●</span> Checkpoint""" ,font_size=1_8 ,)
blue_text.next_to(__lowercase ,DOWN * 2.4 ,aligned_edge=key_text.get_left() )
self.add(__lowercase )
snake_case__ : List[str] = MarkupText(
F"""Now watch as an input is passed through the model\nand how the memory is utilized and handled.""" ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ) )
snake_case__ : Optional[Any] = Square(0.3 )
input.set_fill(__lowercase ,opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] ,__lowercase ,buff=0.5 )
self.play(Write(__lowercase ) )
input.generate_target()
input.target.next_to(model_arr[0] ,direction=__lowercase ,buff=0.02 )
self.play(MoveToTarget(__lowercase ) )
self.play(FadeOut(__lowercase ) )
snake_case__ : Optional[Any] = Arrow(start=__lowercase ,end=__lowercase ,color=__lowercase ,buff=0.5 )
a.next_to(model_arr[0].get_left() ,__lowercase ,buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
snake_case__ : Dict = MarkupText(
F"""As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.""" ,font_size=2_4 ,)
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=3 ) )
snake_case__ : Tuple = {'''run_time''': 1, '''fade_in''': True, '''fade_out''': True, '''buff''': 0.02}
self.play(
Write(__lowercase ) ,Circumscribe(model_arr[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(model_cpu_arr[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,)
self.play(MoveToTarget(model_cpu_arr[0] ) )
snake_case__ : int = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.02 ,__lowercase ,buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.02 )
snake_case__ : Tuple = AnimationGroup(
FadeOut(__lowercase ,run_time=0.5 ) ,MoveToTarget(__lowercase ,run_time=0.5 ) ,FadeIn(__lowercase ,run_time=0.5 ) ,lag_ratio=0.2 )
self.play(__lowercase )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
snake_case__ : str = 0.7
self.play(
Circumscribe(model_arr[i] ,**__lowercase ) ,Circumscribe(cpu_left_col_base[i] ,**__lowercase ) ,Circumscribe(cpu_left_col_base[i + 1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,Circumscribe(model_arr[i + 1] ,color=__lowercase ,**__lowercase ) ,)
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) ,MoveToTarget(model_cpu_arr[i + 1] ) ,)
else:
self.play(
MoveToTarget(model_cpu_arr[i] ,run_time=0.7 ) ,MoveToTarget(model_cpu_arr[i + 1] ,run_time=0.7 ) ,)
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() ,RIGHT + 0.02 ,buff=0.2 )
self.play(
Circumscribe(model_arr[-1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(cpu_left_col_base[-1] ,color=__lowercase ,**__lowercase ) ,Circumscribe(gpu_rect[0] ,color=__lowercase ,**__lowercase ) ,)
self.play(MoveToTarget(model_cpu_arr[i] ) )
snake_case__ : List[str] = a_c
snake_case__ : Optional[int] = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] ,RIGHT + 0.02 ,buff=0.5 )
self.play(
FadeOut(__lowercase ) ,FadeOut(__lowercase ,run_time=0.5 ) ,)
snake_case__ : Optional[int] = MarkupText(F"""Inference on a model too large for GPU memory\nis successfully completed.""" ,font_size=2_4 )
step_a.move_to([2, 2, 0] )
self.play(Write(__lowercase ,run_time=3 ) ,MoveToTarget(__lowercase ) )
self.wait()
| 44 | 0 |
import unittest
from pathlib import Path
from tempfile import NamedTemporaryFile, TemporaryDirectory
from transformers import BertConfig, BertTokenizerFast, FeatureExtractionPipeline
from transformers.convert_graph_to_onnx import (
convert,
ensure_valid_input,
generate_identified_filename,
infer_shapes,
quantize,
)
from transformers.testing_utils import require_tf, require_tokenizers, require_torch, slow
class __magic_name__ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :int , snake_case :Optional[Any] , snake_case :Optional[int] , snake_case :int ):
'''simple docstring'''
return None
class __magic_name__ :
"""simple docstring"""
def SCREAMING_SNAKE_CASE ( self :Optional[Any] , snake_case :Optional[int] , snake_case :str , snake_case :List[str] , snake_case :Optional[int] ):
'''simple docstring'''
return None
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase = [
# (model_name, model_kwargs)
('bert-base-cased', {}),
('gpt2', {'use_cache': False}), # We don't support exporting GPT2 past keys anymore
]
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self :int ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase , "tf" , 12 , **__UpperCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
self._test_export(__UpperCAmelCase , "pt" , 12 , **__UpperCAmelCase )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
from transformers import BertModel
A_ : str = ["[UNK]", "[SEP]", "[CLS]", "[PAD]", "[MASK]", "some", "other", "words"]
with NamedTemporaryFile(mode="w+t" ) as vocab_file:
vocab_file.write("\n".join(__UpperCAmelCase ) )
vocab_file.flush()
A_ : Any = BertTokenizerFast(vocab_file.name )
with TemporaryDirectory() as bert_save_dir:
A_ : List[str] = BertModel(BertConfig(vocab_size=len(__UpperCAmelCase ) ) )
model.save_pretrained(__UpperCAmelCase )
self._test_export(__UpperCAmelCase , "pt" , 12 , __UpperCAmelCase )
@require_tf
@slow
def SCREAMING_SNAKE_CASE ( self :Dict ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : int = self._test_export(__UpperCAmelCase , "tf" , 12 , **__UpperCAmelCase )
A_ : int = quantize(Path(__UpperCAmelCase ) )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
@require_torch
@slow
def SCREAMING_SNAKE_CASE ( self :Optional[Any] ):
'''simple docstring'''
for model, model_kwargs in OnnxExportTestCase.MODEL_TO_TEST:
A_ : Optional[Any] = self._test_export(__UpperCAmelCase , "pt" , 12 , **__UpperCAmelCase )
A_ : List[Any] = quantize(__UpperCAmelCase )
# Ensure the actual quantized model is not bigger than the original one
if quantized_path.stat().st_size >= Path(__UpperCAmelCase ).stat().st_size:
self.fail("Quantized model is bigger than initial ONNX model" )
def SCREAMING_SNAKE_CASE ( self :Union[str, Any] , snake_case :Tuple , snake_case :Optional[int] , snake_case :int , snake_case :int=None , **snake_case :int ):
'''simple docstring'''
try:
# Compute path
with TemporaryDirectory() as tempdir:
A_ : List[str] = Path(__UpperCAmelCase ).joinpath("model.onnx" )
# Remove folder if exists
if path.parent.exists():
path.parent.rmdir()
# Export
convert(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
return path
except Exception as e:
self.fail(__UpperCAmelCase )
@require_torch
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self :str ):
'''simple docstring'''
from transformers import BertModel
A_ : Union[str, Any] = BertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
A_ : List[str] = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__UpperCAmelCase , __UpperCAmelCase , "pt" )
@require_tf
@require_tokenizers
@slow
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
from transformers import TFBertModel
A_ : Optional[int] = TFBertModel(BertConfig.from_pretrained("lysandre/tiny-bert-random" ) )
A_ : Any = BertTokenizerFast.from_pretrained("lysandre/tiny-bert-random" )
self._test_infer_dynamic_axis(__UpperCAmelCase , __UpperCAmelCase , "tf" )
def SCREAMING_SNAKE_CASE ( self :List[Any] , snake_case :Optional[Any] , snake_case :int , snake_case :Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = FeatureExtractionPipeline(__UpperCAmelCase , __UpperCAmelCase )
A_ : Optional[int] = ["input_ids", "token_type_ids", "attention_mask", "output_0", "output_1"]
A_ , A_ , A_ , A_ : Optional[int] = infer_shapes(__UpperCAmelCase , __UpperCAmelCase )
# Assert all variables are present
self.assertEqual(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
self.assertTrue(all(var_name in shapes for var_name in variable_names ) )
self.assertSequenceEqual(variable_names[:3] , __UpperCAmelCase )
self.assertSequenceEqual(variable_names[3:] , __UpperCAmelCase )
# Assert inputs are {0: batch, 1: sequence}
for var_name in ["input_ids", "token_type_ids", "attention_mask"]:
self.assertDictEqual(shapes[var_name] , {0: "batch", 1: "sequence"} )
# Assert outputs are {0: batch, 1: sequence} and {0: batch}
self.assertDictEqual(shapes["output_0"] , {0: "batch", 1: "sequence"} )
self.assertDictEqual(shapes["output_1"] , {0: "batch"} )
def SCREAMING_SNAKE_CASE ( self :Optional[int] ):
'''simple docstring'''
A_ : List[Any] = ["input_ids", "attention_mask", "token_type_ids"]
A_ : str = {"input_ids": [1, 2, 3, 4], "attention_mask": [0, 0, 0, 0], "token_type_ids": [1, 1, 1, 1]}
A_ , A_ : Dict = ensure_valid_input(FuncContiguousArgs() , __UpperCAmelCase , __UpperCAmelCase )
# Should have exactly the same number of args (all are valid)
self.assertEqual(len(__UpperCAmelCase ) , 3 )
# Should have exactly the same input names
self.assertEqual(set(__UpperCAmelCase ) , set(__UpperCAmelCase ) )
# Parameter should be reordered according to their respective place in the function:
# (input_ids, token_type_ids, attention_mask)
self.assertEqual(__UpperCAmelCase , (tokens["input_ids"], tokens["token_type_ids"], tokens["attention_mask"]) )
# Generated args are interleaved with another args (for instance parameter "past" in GPT2)
A_ , A_ : str = ensure_valid_input(FuncNonContiguousArgs() , __UpperCAmelCase , __UpperCAmelCase )
# Should have exactly the one arg (all before the one not provided "some_other_args")
self.assertEqual(len(__UpperCAmelCase ) , 1 )
self.assertEqual(len(__UpperCAmelCase ) , 1 )
# Should have only "input_ids"
self.assertEqual(inputs_args[0] , tokens["input_ids"] )
self.assertEqual(ordered_input_names[0] , "input_ids" )
def SCREAMING_SNAKE_CASE ( self :List[str] ):
'''simple docstring'''
A_ : int = generate_identified_filename(Path("/home/something/my_fake_model.onnx" ) , "-test" )
self.assertEqual("/home/something/my_fake_model-test.onnx" , generated.as_posix() )
| 300 | """simple docstring"""
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'''
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=True ):
"""simple docstring"""
model.train()
A__ = model(UpperCamelCase__ )
A__ = F.mse_loss(UpperCamelCase__ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=False ):
"""simple docstring"""
set_seed(42 )
A__ = RegressionModel()
A__ = deepcopy(UpperCamelCase__ )
A__ = RegressionDataset(length=80 )
A__ = DataLoader(UpperCamelCase__ , batch_size=16 )
model.to(accelerator.device )
if sched:
A__ = AdamW(params=model.parameters() , lr=1E-3 )
A__ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
A__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
A__ = LambdaLR(UpperCamelCase__ , lr_lambda=lambda UpperCamelCase__ : epoch**0.6_5 )
# Make a copy of `model`
if sched:
A__ , A__ , A__ , A__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
A__ , A__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ , A__ , A__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
A__ , A__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ , A__ , A__ = get_training_setup(UpperCamelCase__ )
# Use a single batch
A__ , A__ = next(iter(UpperCamelCase__ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
else:
# Sync grads
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
def UpperCAmelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
"""simple docstring"""
A__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__ , A__ , A__ = get_training_setup(UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
A__ , A__ = batch.values()
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(UpperCamelCase__ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'''Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'''
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'''Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'''
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
A__ = ddp_input[torch.randperm(len(UpperCamelCase__ ) )]
GradientState._reset_state()
def UpperCAmelCase ( UpperCamelCase__=False , UpperCamelCase__=False ):
"""simple docstring"""
A__ = Accelerator(
split_batches=UpperCamelCase__ , dispatch_batches=UpperCamelCase__ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
A__ , A__ , A__ , A__ , A__ , A__ , A__ = get_training_setup(UpperCamelCase__ , UpperCamelCase__ )
for iteration, batch in enumerate(UpperCamelCase__ ):
A__ , A__ = batch.values()
# Gather the distributed inputs and targs for the base model
A__ , A__ = accelerator.gather((ddp_input, ddp_target) )
A__ , A__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(UpperCamelCase__ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(UpperCamelCase__ ):
step_model(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'''Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'''
A__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(UpperCamelCase__ ))
if accelerator.num_processes > 1:
check_model_parameters(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1_337 + iteration )
GradientState._reset_state()
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = Accelerator()
A__ = RegressionDataset(length=80 )
A__ = DataLoader(UpperCamelCase__ , batch_size=16 )
A__ = RegressionDataset(length=96 )
A__ = DataLoader(UpperCamelCase__ , batch_size=16 )
A__ , A__ = accelerator.prepare(UpperCamelCase__ , UpperCamelCase__ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if iteration < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(UpperCamelCase__ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(UpperCamelCase__ )
if batch_num < len(UpperCamelCase__ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def UpperCAmelCase ( ):
"""simple docstring"""
A__ = Accelerator()
A__ = accelerator.state
if state.local_process_index == 0:
print('**Test `accumulate` gradient accumulation with dataloader break**' )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print('**Test NOOP `no_sync` context manager**' )
test_noop_sync(UpperCamelCase__ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print('**Test Distributed `no_sync` context manager**' )
test_distributed_sync(UpperCamelCase__ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation(UpperCamelCase__ , UpperCamelCase__ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version('<' , '2.0' ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , '`split_batches=False`, `dispatch_batches=False`**' , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
'**Test `accumulate` gradient accumulation with optimizer and scheduler, ' , F'''`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**''' , )
test_gradient_accumulation_with_opt_and_scheduler(UpperCamelCase__ , UpperCamelCase__ )
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 221 | 0 |
"""simple docstring"""
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class __lowerCAmelCase ( A_):
_lowercase : Optional[int] = """openai/whisper-base"""
_lowercase : str = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_lowercase : int = """transcriber"""
_lowercase : Any = WhisperProcessor
_lowercase : str = WhisperForConditionalGeneration
_lowercase : Tuple = ["""audio"""]
_lowercase : Union[str, Any] = ["""text"""]
def _lowercase ( self , lowerCAmelCase__ ) -> Union[str, Any]:
'''simple docstring'''
return self.pre_processor(_lowerCamelCase , return_tensors="pt" ).input_features
def _lowercase ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return self.model.generate(inputs=_lowerCamelCase )
def _lowercase ( self , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
return self.pre_processor.batch_decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )[0]
| 368 |
from __future__ import annotations
import math
def _A ( SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
if num <= 0:
a__ : List[str] =f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE )
a__ : Union[str, Any] =[True] * (num + 1)
a__ : Union[str, Any] =[]
a__ : str =2
a__ : Any =int(math.sqrt(SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
a__ : Optional[int] =False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip())))
| 148 | 0 |
def lowerCAmelCase_ ( __UpperCAmelCase: Any , __UpperCAmelCase: str , __UpperCAmelCase: List[Any] , __UpperCAmelCase: Optional[int] ) -> Any:
if height >= 1:
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
move_disk(__UpperCAmelCase , __UpperCAmelCase )
move_tower(height - 1 , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def lowerCAmelCase_ ( __UpperCAmelCase: str , __UpperCAmelCase: Dict ) -> List[Any]:
print('''moving disk from''' , __UpperCAmelCase , '''to''' , __UpperCAmelCase )
def lowerCAmelCase_ ( ) -> List[Any]:
UpperCamelCase__ : str = int(input('''Height of hanoi: ''' ).strip() )
move_tower(__UpperCAmelCase , '''A''' , '''B''' , '''C''' )
if __name__ == "__main__":
main()
| 201 |
from __future__ import annotations
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int ) -> list[list[int]]:
UpperCamelCase__ : list[list[int]] = []
create_all_state(1 , __UpperCAmelCase , __UpperCAmelCase , [] , __UpperCAmelCase )
return result
def lowerCAmelCase_ ( __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: int , __UpperCAmelCase: list[int] , __UpperCAmelCase: list[list[int]] , ) -> None:
if level == 0:
total_list.append(current_list[:] )
return
for i in range(__UpperCAmelCase , total_number - level + 2 ):
current_list.append(__UpperCAmelCase )
create_all_state(i + 1 , __UpperCAmelCase , level - 1 , __UpperCAmelCase , __UpperCAmelCase )
current_list.pop()
def lowerCAmelCase_ ( __UpperCAmelCase: list[list[int]] ) -> None:
for i in total_list:
print(*__UpperCAmelCase )
if __name__ == "__main__":
UpperCAmelCase_ = 4
UpperCAmelCase_ = 2
UpperCAmelCase_ = generate_all_combinations(n, k)
print_all_state(total_list)
| 201 | 1 |
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@property
def UpperCamelCase ( self ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def UpperCamelCase ( self ):
A__ = ort.SessionOptions()
A__ = False
return options
def UpperCamelCase ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
A__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''',revision='''onnx''',safety_checker=__lowerCamelCase,feature_extractor=__lowerCamelCase,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A red cat sitting on a park bench'''
A__ = np.random.RandomState(0 )
A__ = pipe(
prompt=__lowerCamelCase,image=__lowerCamelCase,mask_image=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=10,generator=__lowerCamelCase,output_type='''np''',)
A__ = output.images
A__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A__ = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def UpperCamelCase ( self ):
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo.png''' )
A__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/in_paint/overture-creations-5sI6fQgYIuo_mask.png''' )
A__ = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-inpainting''',subfolder='''scheduler''',revision='''onnx''' )
A__ = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'''runwayml/stable-diffusion-inpainting''',revision='''onnx''',scheduler=__lowerCamelCase,safety_checker=__lowerCamelCase,feature_extractor=__lowerCamelCase,provider=self.gpu_provider,sess_options=self.gpu_options,)
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A__ = '''A red cat sitting on a park bench'''
A__ = np.random.RandomState(0 )
A__ = pipe(
prompt=__lowerCamelCase,image=__lowerCamelCase,mask_image=__lowerCamelCase,guidance_scale=7.5,num_inference_steps=20,generator=__lowerCamelCase,output_type='''np''',)
A__ = output.images
A__ = images[0, 255:258, 255:258, -1]
assert images.shape == (1, 512, 512, 3)
A__ = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
| 39 |
# DISCLAIMER: This code is strongly influenced by https://github.com/pesser/pytorch_diffusion
# and https://github.com/hojonathanho/diffusion
import math
from dataclasses import dataclass
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from diffusers.configuration_utils import ConfigMixin, register_to_config
from diffusers.schedulers.scheduling_utils import SchedulerMixin
from diffusers.utils import BaseOutput, deprecate
@dataclass
# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->DDIM
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = None
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple=0.999 , UpperCamelCase__ : Optional[int]="cosine" , )->Optional[Any]:
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ : List[str] ):
return math.cos((t + 0.008) / 1.008 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ : Union[str, Any] ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}" )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , UpperCamelCase__ ):
__SCREAMING_SNAKE_CASE = 1
@register_to_config
def __init__( self,__lowerCamelCase = 1000,__lowerCamelCase = 0.0001,__lowerCamelCase = 0.02,__lowerCamelCase = "linear",__lowerCamelCase = None,__lowerCamelCase = True,__lowerCamelCase = True,__lowerCamelCase = 0,__lowerCamelCase = "epsilon",__lowerCamelCase = 1.0,**__lowerCamelCase,):
if kwargs.get('''set_alpha_to_one''',__lowerCamelCase ) is not None:
A__ = (
'''The `set_alpha_to_one` argument is deprecated. Please use `set_alpha_to_zero` instead.'''
)
deprecate('''set_alpha_to_one''','''1.0.0''',__lowerCamelCase,standard_warn=__lowerCamelCase )
A__ = kwargs['''set_alpha_to_one''']
if trained_betas is not None:
A__ = torch.tensor(__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5,beta_end**0.5,__lowerCamelCase,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__lowerCamelCase )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas,dim=0 )
# At every step in inverted ddim, we are looking into the next alphas_cumprod
# For the final step, there is no next alphas_cumprod, and the index is out of bounds
# `set_alpha_to_zero` decides whether we set this parameter simply to zero
# in this case, self.step() just output the predicted noise
# or whether we use the final alpha of the "non-previous" one.
A__ = torch.tensor(0.0 ) if set_alpha_to_zero else self.alphas_cumprod[-1]
# standard deviation of the initial noise distribution
A__ = 1.0
# setable values
A__ = None
A__ = torch.from_numpy(np.arange(0,__lowerCamelCase ).copy().astype(np.intaa ) )
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
return sample
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase = None ):
if num_inference_steps > self.config.num_train_timesteps:
raise ValueError(
f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
f" maximal {self.config.num_train_timesteps} timesteps." )
A__ = num_inference_steps
A__ = self.config.num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0,__lowerCamelCase ) * step_ratio).round().copy().astype(np.intaa )
A__ = torch.from_numpy(__lowerCamelCase ).to(__lowerCamelCase )
self.timesteps += self.config.steps_offset
def UpperCamelCase ( self,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase,__lowerCamelCase = 0.0,__lowerCamelCase = False,__lowerCamelCase = None,__lowerCamelCase = True,):
# 1. get previous step value (=t+1)
A__ = timestep + self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
# change original implementation to exactly match noise levels for analogous forward process
A__ = self.alphas_cumprod[timestep]
A__ = (
self.alphas_cumprod[prev_timestep]
if prev_timestep < self.config.num_train_timesteps
else self.final_alpha_cumprod
)
A__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
if self.config.prediction_type == "epsilon":
A__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
A__ = model_output
elif self.config.prediction_type == "sample":
A__ = model_output
A__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
elif self.config.prediction_type == "v_prediction":
A__ = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
A__ = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
''' `v_prediction`''' )
# 4. Clip or threshold "predicted x_0"
if self.config.clip_sample:
A__ = pred_original_sample.clamp(
-self.config.clip_sample_range,self.config.clip_sample_range )
# 5. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = (1 - alpha_prod_t_prev) ** 0.5 * pred_epsilon
# 6. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if not return_dict:
return (prev_sample, pred_original_sample)
return DDIMSchedulerOutput(prev_sample=__lowerCamelCase,pred_original_sample=__lowerCamelCase )
def __len__( self ):
return self.config.num_train_timesteps
| 39 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_SCREAMING_SNAKE_CASE : Dict = {
'''configuration_maskformer''': ['''MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MaskFormerConfig'''],
'''configuration_maskformer_swin''': ['''MaskFormerSwinConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = ['''MaskFormerFeatureExtractor''']
_SCREAMING_SNAKE_CASE : str = ['''MaskFormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [
'''MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MaskFormerForInstanceSegmentation''',
'''MaskFormerModel''',
'''MaskFormerPreTrainedModel''',
]
_SCREAMING_SNAKE_CASE : Dict = [
'''MaskFormerSwinBackbone''',
'''MaskFormerSwinModel''',
'''MaskFormerSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskformer import MASKFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskFormerConfig
from .configuration_maskformer_swin import MaskFormerSwinConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_maskformer import MaskFormerFeatureExtractor
from .image_processing_maskformer import MaskFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskformer import (
MASKFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskFormerForInstanceSegmentation,
MaskFormerModel,
MaskFormerPreTrainedModel,
)
from .modeling_maskformer_swin import (
MaskFormerSwinBackbone,
MaskFormerSwinModel,
MaskFormerSwinPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure)
| 314 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''unc-nlp/lxmert-base-uncased''': '''https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json''',
}
class UpperCAmelCase__ ( A__ ):
"""simple docstring"""
a = "lxmert"
a = {}
def __init__( self : Union[str, Any] , __lowerCamelCase : List[str]=3_0522 , __lowerCamelCase : Union[str, Any]=768 , __lowerCamelCase : Dict=12 , __lowerCamelCase : Union[str, Any]=9500 , __lowerCamelCase : Union[str, Any]=1600 , __lowerCamelCase : Any=400 , __lowerCamelCase : List[str]=3072 , __lowerCamelCase : List[str]="gelu" , __lowerCamelCase : Optional[Any]=0.1 , __lowerCamelCase : Union[str, Any]=0.1 , __lowerCamelCase : Optional[Any]=512 , __lowerCamelCase : Optional[int]=2 , __lowerCamelCase : Any=0.02 , __lowerCamelCase : Any=1e-12 , __lowerCamelCase : List[Any]=9 , __lowerCamelCase : Any=5 , __lowerCamelCase : List[str]=5 , __lowerCamelCase : Optional[Any]=2048 , __lowerCamelCase : Optional[int]=4 , __lowerCamelCase : List[str]=6.67 , __lowerCamelCase : Dict=True , __lowerCamelCase : Any=True , __lowerCamelCase : Any=True , __lowerCamelCase : Tuple=True , __lowerCamelCase : Optional[Any]=True , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=True , **__lowerCamelCase : Optional[Any] , ) -> Any:
SCREAMING_SNAKE_CASE__ = vocab_size
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = max_position_embeddings
SCREAMING_SNAKE_CASE__ = type_vocab_size
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = layer_norm_eps
SCREAMING_SNAKE_CASE__ = num_qa_labels
SCREAMING_SNAKE_CASE__ = num_object_labels
SCREAMING_SNAKE_CASE__ = num_attr_labels
SCREAMING_SNAKE_CASE__ = l_layers
SCREAMING_SNAKE_CASE__ = x_layers
SCREAMING_SNAKE_CASE__ = r_layers
SCREAMING_SNAKE_CASE__ = visual_feat_dim
SCREAMING_SNAKE_CASE__ = visual_pos_dim
SCREAMING_SNAKE_CASE__ = visual_loss_normalizer
SCREAMING_SNAKE_CASE__ = task_matched
SCREAMING_SNAKE_CASE__ = task_mask_lm
SCREAMING_SNAKE_CASE__ = task_obj_predict
SCREAMING_SNAKE_CASE__ = task_qa
SCREAMING_SNAKE_CASE__ = visual_obj_loss
SCREAMING_SNAKE_CASE__ = visual_attr_loss
SCREAMING_SNAKE_CASE__ = visual_feat_loss
SCREAMING_SNAKE_CASE__ = {'''vision''': r_layers, '''cross_encoder''': x_layers, '''language''': l_layers}
super().__init__(**__lowerCamelCase )
| 314 | 1 |
"""simple docstring"""
import math
UpperCAmelCase =10
UpperCAmelCase =7
UpperCAmelCase =BALLS_PER_COLOUR * NUM_COLOURS
def _A ( _a : int = 2_0 ):
"""simple docstring"""
A = math.comb(_a , _a )
A = math.comb(NUM_BALLS - BALLS_PER_COLOUR , _a )
A = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 77 |
"""simple docstring"""
import pytest
UpperCAmelCase ="__dummy_dataset1__"
UpperCAmelCase ="\nimport json\nimport os\n\nimport datasets\n\n\nREPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"\nURLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}\n\n\nclass __DummyDataset1__(datasets.GeneratorBasedBuilder):\n\n def _info(self):\n features = datasets.Features(\n {\n \"tokens\": datasets.Sequence(datasets.Value(\"string\")),\n \"ner_tags\": datasets.Sequence(\n datasets.features.ClassLabel(\n names=[\n \"O\",\n \"B-PER\",\n \"I-PER\",\n \"B-ORG\",\n \"I-ORG\",\n \"B-LOC\",\n \"I-LOC\",\n ]\n )\n ),\n \"langs\": datasets.Sequence(datasets.Value(\"string\")),\n \"spans\": datasets.Sequence(datasets.Value(\"string\")),\n }\n )\n return datasets.DatasetInfo(features=features)\n\n def _split_generators(self, dl_manager):\n dl_path = dl_manager.download(URLS)\n return [\n datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),\n datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),\n ]\n\n def _generate_examples(self, filepath):\n with open(filepath, \"r\", encoding=\"utf-8\") as f:\n for i, line in enumerate(f):\n yield i, json.loads(line)\n"
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def _A ( ):
"""simple docstring"""
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def _A ( _a : str , _a : List[Any] , _a : List[Any] ):
"""simple docstring"""
A = dataset_loading_script_name
A = tmp_path / """datasets""" / script_name
script_dir.mkdir(parents=_a )
A = script_dir / f'{script_name}.py'
with open(_a , """w""" ) as f:
f.write(_a )
return str(_a )
| 77 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
lowerCamelCase__ = {
"""169M""": 768,
"""430M""": 1024,
"""1B5""": 2048,
"""3B""": 2560,
"""7B""": 4096,
"""14B""": 5120,
}
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] ):
"""simple docstring"""
__a = list(state_dict.keys() )
for name in state_dict_keys:
__a = state_dict.pop(_SCREAMING_SNAKE_CASE )
# emb -> embedding
if name.startswith("""emb.""" ):
__a = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
__a = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
__a = re.sub(r"""blocks\.(\d+)\.att""" , r"""blocks.\1.attention""" , _SCREAMING_SNAKE_CASE )
# ffn -> feed_forward
__a = re.sub(r"""blocks\.(\d+)\.ffn""" , r"""blocks.\1.feed_forward""" , _SCREAMING_SNAKE_CASE )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
__a = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
__a = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
__a = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
__a = """rwkv.""" + name
__a = weight
return state_dict
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : Dict , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : int=None , _SCREAMING_SNAKE_CASE : Any=None , _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : int=None ):
"""simple docstring"""
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
__a = 5_0277
__a = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
__a = PreTrainedTokenizerFast(tokenizer_file=_SCREAMING_SNAKE_CASE )
__a = len(_SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(_SCREAMING_SNAKE_CASE )
# 2. Build the config
__a = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__a = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(f"`size` should be one of {possible_sizes}, got {size}." )
__a = RwkvConfig(
vocab_size=_SCREAMING_SNAKE_CASE , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_SCREAMING_SNAKE_CASE )
# 3. Download model file then convert state_dict
__a = hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__a = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
__a = convert_state_dict(_SCREAMING_SNAKE_CASE )
# 4. Split in shards and save
__a , __a = shard_checkpoint(_SCREAMING_SNAKE_CASE )
for shard_file, shard in shards.items():
torch.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
if index is not None:
__a = os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Save the index as well
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
__a = json.dumps(_SCREAMING_SNAKE_CASE , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE ) + """\n"""
f.write(_SCREAMING_SNAKE_CASE )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
__a = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__a = torch.load(os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
__a = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE )
model.push_to_hub(_SCREAMING_SNAKE_CASE , max_shard_size="""2GB""" )
tokenizer.push_to_hub(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 302 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
__lowerCamelCase : Union[List[np.ndarray], torch.FloatTensor]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 302 | 1 |
"""simple docstring"""
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: Optional[int] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Dict = 1
_snake_case : Optional[int] = 3
_snake_case : str = (32, 32)
_snake_case : Optional[int] = floats_tensor((batch_size, num_channels) + sizes, rng=random.Random(0 ) ).to(a_ )
return image
@property
def UpperCamelCase_ ( self: List[str] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : str = UNetaDConditionModel(
block_out_channels=(32, 64), layers_per_block=2, sample_size=32, in_channels=4, out_channels=4, down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D"""), up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D"""), cross_attention_dim=32, )
return model
@property
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64], in_channels=3, out_channels=3, down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""], up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""], latent_channels=4, )
return model
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
_snake_case : List[Any] = CLIPTextConfig(
bos_token_id=0, eos_token_id=2, hidden_size=32, intermediate_size=37, layer_norm_eps=1E-05, num_attention_heads=4, num_hidden_layers=5, pad_token_id=1, vocab_size=1_000, )
return CLIPTextModel(a_ )
@property
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
def extract(*a_: List[Any], **a_: Optional[int] ):
class lowercase:
'''simple docstring'''
def __init__( self: Dict ):
'''simple docstring'''
_snake_case : Any = torch.ones([0] )
def UpperCamelCase_ ( self: Any, a_: List[Any] ):
'''simple docstring'''
self.pixel_values.to(a_ )
return self
return Out()
return extract
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : int = self.dummy_cond_unet
_snake_case : Union[str, Any] = DDIMScheduler(
beta_start=0.00_085, beta_end=0.012, beta_schedule="""scaled_linear""", clip_sample=a_, set_alpha_to_one=a_, )
_snake_case : List[Any] = self.dummy_vae
_snake_case : int = self.dummy_text_encoder
_snake_case : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_snake_case : str = StableDiffusionPipeline(
unet=a_, scheduler=a_, vae=a_, text_encoder=a_, tokenizer=a_, safety_checker=a_, feature_extractor=self.dummy_extractor, )
_snake_case : Dict = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : List[str] = 'A painting of a squirrel eating a burger'
_snake_case : str = torch.Generator(device=a_ ).manual_seed(0 )
_snake_case : Dict = sd_pipe([prompt], generator=a_, guidance_scale=6.0, num_inference_steps=2, output_type="""np""" )
_snake_case : List[str] = output.images
_snake_case : Optional[Any] = torch.Generator(device=a_ ).manual_seed(0 )
_snake_case : Union[str, Any] = sd_pipe(
[prompt], generator=a_, guidance_scale=6.0, num_inference_steps=2, output_type="""np""", return_dict=a_, )[0]
_snake_case : str = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5_756, 0.6_118, 0.5_005, 0.5_041, 0.5_471, 0.4_726, 0.4_976, 0.4_865, 0.4_864] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_snake_case : List[Any] = self.dummy_cond_unet
_snake_case : Tuple = PNDMScheduler(skip_prk_steps=a_ )
_snake_case : List[str] = self.dummy_vae
_snake_case : List[str] = self.dummy_text_encoder
_snake_case : Union[str, Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# make sure here that pndm scheduler skips prk
_snake_case : str = StableDiffusionPipeline(
unet=a_, scheduler=a_, vae=a_, text_encoder=a_, tokenizer=a_, safety_checker=a_, feature_extractor=self.dummy_extractor, )
_snake_case : Optional[int] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : Tuple = 'A painting of a squirrel eating a burger'
_snake_case : int = torch.Generator(device=a_ ).manual_seed(0 )
_snake_case : List[Any] = sd_pipe([prompt], generator=a_, guidance_scale=6.0, num_inference_steps=2, output_type="""np""" )
_snake_case : Union[str, Any] = output.images
_snake_case : str = torch.Generator(device=a_ ).manual_seed(0 )
_snake_case : Any = sd_pipe(
[prompt], generator=a_, guidance_scale=6.0, num_inference_steps=2, output_type="""np""", return_dict=a_, )[0]
_snake_case : List[Any] = image[0, -3:, -3:, -1]
_snake_case : List[str] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_snake_case : str = np.array([0.5_125, 0.5_716, 0.4_828, 0.5_060, 0.5_650, 0.4_768, 0.5_185, 0.4_895, 0.4_993] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self: Tuple ):
'''simple docstring'''
_snake_case : Union[str, Any] = StableDiffusionPipeline.from_pretrained(
"""hf-internal-testing/tiny-stable-diffusion-lms-pipe""", safety_checker=a_ )
assert isinstance(a_, a_ )
assert isinstance(pipe.scheduler, a_ )
assert pipe.safety_checker is None
_snake_case : Any = pipe("""example prompt""", num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(a_ )
_snake_case : Dict = StableDiffusionPipeline.from_pretrained(a_ )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
_snake_case : Tuple = pipe("""example prompt""", num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != """cuda""", """This test requires a GPU""" )
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : int = self.dummy_cond_unet
_snake_case : Optional[int] = PNDMScheduler(skip_prk_steps=a_ )
_snake_case : Any = self.dummy_vae
_snake_case : Union[str, Any] = self.dummy_text_encoder
_snake_case : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
# put models in fp16
_snake_case : Optional[Any] = unet.half()
_snake_case : Any = vae.half()
_snake_case : Tuple = bert.half()
# make sure here that pndm scheduler skips prk
_snake_case : Tuple = StableDiffusionPipeline(
unet=a_, scheduler=a_, vae=a_, text_encoder=a_, tokenizer=a_, safety_checker=a_, feature_extractor=self.dummy_extractor, )
_snake_case : Optional[Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : Any = 'A painting of a squirrel eating a burger'
_snake_case : List[Any] = sd_pipe([prompt], num_inference_steps=2, output_type="""np""" ).images
assert image.shape == (1, 64, 64, 3)
@nightly
@require_torch_gpu
class lowercase( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self: str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self: List[Any] ):
'''simple docstring'''
_snake_case : Dict = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""", safety_checker=a_ )
_snake_case : Optional[Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Any = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : Tuple = (
'portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle'
' coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with'
' anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and'
' children from bahnhof zoo, detailed '
)
_snake_case : Dict = 4_003_660_346
_snake_case : str = 7
# without safety guidance (sld_guidance_scale = 0)
_snake_case : Dict = torch.manual_seed(a_ )
_snake_case : List[Any] = sd_pipe(
[prompt], generator=a_, guidance_scale=a_, num_inference_steps=50, output_type="""np""", width=512, height=512, sld_guidance_scale=0, )
_snake_case : int = output.images
_snake_case : Optional[Any] = image[0, -3:, -3:, -1]
_snake_case : List[str] = [0.2_278, 0.2_231, 0.2_249, 0.2_333, 0.2_303, 0.1_885, 0.2_273, 0.2_144, 0.2_176]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
# without safety guidance (strong configuration)
_snake_case : str = torch.manual_seed(a_ )
_snake_case : Optional[int] = sd_pipe(
[prompt], generator=a_, guidance_scale=a_, num_inference_steps=50, output_type="""np""", width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
_snake_case : Dict = output.images
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : List[str] = [0.2_383, 0.2_276, 0.236, 0.2_192, 0.2_186, 0.2_053, 0.1_971, 0.1_901, 0.1_719]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self: int ):
'''simple docstring'''
_snake_case : List[Any] = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""", safety_checker=a_ )
_snake_case : Dict = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
_snake_case : Dict = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : int = 'padme amidala taking a bath artwork, safe for work, no nudity'
_snake_case : Tuple = 2_734_971_755
_snake_case : Tuple = 7
_snake_case : Optional[int] = torch.manual_seed(a_ )
_snake_case : Tuple = sd_pipe(
[prompt], generator=a_, guidance_scale=a_, num_inference_steps=50, output_type="""np""", width=512, height=512, sld_guidance_scale=0, )
_snake_case : int = output.images
_snake_case : Tuple = image[0, -3:, -3:, -1]
_snake_case : Any = [0.3_502, 0.3_622, 0.3_396, 0.3_642, 0.3_478, 0.3_318, 0.35, 0.3_348, 0.3_297]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
_snake_case : Tuple = torch.manual_seed(a_ )
_snake_case : Any = sd_pipe(
[prompt], generator=a_, guidance_scale=a_, num_inference_steps=50, output_type="""np""", width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
_snake_case : Union[str, Any] = output.images
_snake_case : Dict = image[0, -3:, -3:, -1]
_snake_case : Optional[int] = [0.5_531, 0.5_206, 0.4_895, 0.5_156, 0.5_182, 0.4_751, 0.4_802, 0.4_803, 0.4_443]
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCamelCase_ ( self: Any ):
'''simple docstring'''
_snake_case : Tuple = StableDiffusionPipeline.from_pretrained("""runwayml/stable-diffusion-v1-5""" )
_snake_case : Union[str, Any] = sd_pipe.to(a_ )
sd_pipe.set_progress_bar_config(disable=a_ )
_snake_case : Optional[Any] = (
'the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c.'
' leyendecker'
)
_snake_case : int = 1_044_355_234
_snake_case : Any = 12
_snake_case : List[Any] = torch.manual_seed(a_ )
_snake_case : Tuple = sd_pipe(
[prompt], generator=a_, guidance_scale=a_, num_inference_steps=50, output_type="""np""", width=512, height=512, sld_guidance_scale=0, )
_snake_case : Union[str, Any] = output.images
_snake_case : List[str] = image[0, -3:, -3:, -1]
_snake_case : Union[str, Any] = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-7
_snake_case : Union[str, Any] = torch.manual_seed(a_ )
_snake_case : Dict = sd_pipe(
[prompt], generator=a_, guidance_scale=a_, num_inference_steps=50, output_type="""np""", width=512, height=512, sld_guidance_scale=2_000, sld_warmup_steps=7, sld_threshold=0.025, sld_momentum_scale=0.5, sld_mom_beta=0.7, )
_snake_case : str = output.images
_snake_case : List[Any] = image[0, -3:, -3:, -1]
_snake_case : int = np.array([0.5_818, 0.6_285, 0.6_835, 0.6_019, 0.625, 0.6_754, 0.6_096, 0.6_334, 0.6_561] )
assert image.shape == (1, 512, 512, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 362 |
"""simple docstring"""
import torch
from torch import nn
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Any, a_: List[str], a_: Union[str, Any], a_: int, a_: int, a_: List[Any]=1, a_: Union[str, Any]=False ):
'''simple docstring'''
super().__init__()
_snake_case : int = n_token
_snake_case : Tuple = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Any = [0] + self.cutoffs
_snake_case : Tuple = div_val
_snake_case : Optional[int] = self.cutoffs[0]
_snake_case : Union[str, Any] = len(self.cutoffs ) - 1
_snake_case : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : List[Any] = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
_snake_case : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
_snake_case : Any = nn.ModuleList()
_snake_case : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
else:
self.out_projs.append(a_ )
self.out_layers.append(nn.Linear(a_, a_ ) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
self.out_layers.append(nn.Linear(a_, r_idx - l_idx ) )
_snake_case : Optional[int] = keep_order
def UpperCamelCase_ ( self: str, a_: Union[str, Any], a_: Dict, a_: int, a_: Tuple ):
'''simple docstring'''
if proj is None:
_snake_case : List[Any] = nn.functional.linear(a_, a_, bias=a_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[Any] = nn.functional.linear(a_, proj.t().contiguous() )
_snake_case : Tuple = nn.functional.linear(a_, a_, bias=a_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self: Dict, a_: Dict, a_: str=None, a_: Union[str, Any]=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : int = hidden[..., :-1, :].contiguous()
_snake_case : List[Any] = labels[..., 1:].contiguous()
_snake_case : str = hidden.view(-1, hidden.size(-1 ) )
_snake_case : Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_snake_case : int = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
_snake_case : Tuple = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
_snake_case : Dict = labels != -100
_snake_case : str = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : str = (
-nn.functional.log_softmax(a_, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Optional[int] = self.out_layers[i].weight
_snake_case : int = self.out_layers[i].bias
if i == 0:
_snake_case : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : int = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : Any = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Union[str, Any] = nn.functional.log_softmax(a_, dim=1 )
if labels is None:
_snake_case : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_snake_case : Dict = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : Union[str, Any] = 0
_snake_case : Optional[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Dict = (labels >= l_idx) & (labels < r_idx)
_snake_case : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : List[str] = labels.index_select(0, a_ ) - l_idx
_snake_case : List[str] = head_logprob.index_select(0, a_ )
_snake_case : List[str] = hidden.index_select(0, a_ )
else:
_snake_case : List[str] = hidden
if i == 0:
if labels is not None:
_snake_case : Dict = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(a_, a_, a_, a_ )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : Any = logprob_i
if labels is not None:
if (hasattr(self, """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0, a_, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
if self.n_clusters == 0:
_snake_case : Optional[int] = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : str = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : List[Any] = self.out_layers[i].weight
_snake_case : Union[str, Any] = self.out_layers[i].bias
if i == 0:
_snake_case : int = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : Dict = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : List[Any] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : List[Any] = weights[i], biases[i], self.out_projs[i]
_snake_case : Optional[int] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Any = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 132 | 0 |
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A (__A : List[str] , __A : Optional[Any]=False ) -> List[Any]:
"""simple docstring"""
try:
UpperCAmelCase_ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
UpperCAmelCase_ = default
else:
# KEY is set, convert it to True or False.
try:
UpperCAmelCase_ = strtobool(__A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
snake_case_ : Any = parse_flag_from_env("RUN_SLOW", default=False)
def A (__A : Any ) -> Tuple:
"""simple docstring"""
return unittest.skip('''Test was skipped''' )(__A )
def A (__A : int ) -> Dict:
"""simple docstring"""
return unittest.skipUnless(_run_slow_tests , '''test is slow''' )(__A )
def A (__A : Dict ) -> int:
"""simple docstring"""
return unittest.skipUnless(not torch.cuda.is_available() , '''test requires only a CPU''' )(__A )
def A (__A : Dict ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.is_available() , '''test requires a GPU''' )(__A )
def A (__A : Optional[Any] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_xpu_available() , '''test requires a XPU''' )(__A )
def A (__A : List[Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_mps_available() , '''test requires a `mps` backend support in `torch`''' )(__A )
def A (__A : int ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , '''test requires the Hugging Face suite''' )(__A )
def A (__A : Any ) -> List[Any]:
"""simple docstring"""
return unittest.skipUnless(is_bnb_available() , '''test requires the bitsandbytes library''' )(__A )
def A (__A : List[Any] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(is_tpu_available() , '''test requires TPU''' )(__A )
def A (__A : List[Any] ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() == 1 , '''test requires a GPU''' )(__A )
def A (__A : List[str] ) -> List[str]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() == 1 , '''test requires a XPU''' )(__A )
def A (__A : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.cuda.device_count() > 1 , '''test requires multiple GPUs''' )(__A )
def A (__A : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
return unittest.skipUnless(torch.xpu.device_count() > 1 , '''test requires multiple XPUs''' )(__A )
def A (__A : Union[str, Any] ) -> int:
"""simple docstring"""
return unittest.skipUnless(is_safetensors_available() , '''test requires safetensors''' )(__A )
def A (__A : Dict ) -> Optional[int]:
"""simple docstring"""
return unittest.skipUnless(is_deepspeed_available() , '''test requires DeepSpeed''' )(__A )
def A (__A : Union[str, Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_torch_version('''>=''' , '''1.12.0''' ) , '''test requires torch version >= 1.12.0''' )(__A )
def A (__A : Any=None , __A : List[Any]=None ) -> List[str]:
"""simple docstring"""
if test_case is None:
return partial(__A , version=__A )
return unittest.skipUnless(is_torch_version('''>=''' , __A ) , F"""test requires torch version >= {version}""" )(__A )
def A (__A : Union[str, Any] ) -> Any:
"""simple docstring"""
return unittest.skipUnless(is_tensorboard_available() , '''test requires Tensorboard''' )(__A )
def A (__A : Union[str, Any] ) -> str:
"""simple docstring"""
return unittest.skipUnless(is_wandb_available() , '''test requires wandb''' )(__A )
def A (__A : Any ) -> Optional[Any]:
"""simple docstring"""
return unittest.skipUnless(is_comet_ml_available() , '''test requires comet_ml''' )(__A )
snake_case_ : Optional[int] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A (__A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
return unittest.skipUnless(
_atleast_one_tracker_available , '''test requires at least one tracker to be available and for `comet_ml` to not be installed''' , )(__A )
class __snake_case ( unittest.TestCase ):
UpperCAmelCase__ : List[Any] = True
@classmethod
def lowerCamelCase ( cls : List[Any]):
"""simple docstring"""
UpperCAmelCase_ = tempfile.mkdtemp()
@classmethod
def lowerCamelCase ( cls : Dict):
"""simple docstring"""
if os.path.exists(cls.tmpdir):
shutil.rmtree(cls.tmpdir)
def lowerCamelCase ( self : Tuple):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir).glob('''**/*'''):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_snake_case)
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[int]):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __snake_case ( unittest.TestCase ):
def lowerCamelCase ( self : Optional[Any] , _snake_case : Union[mock.Mock, List[mock.Mock]]):
"""simple docstring"""
UpperCAmelCase_ = mocks if isinstance(_snake_case , (tuple, list)) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop)
def A (__A : Optional[int] ) -> List[str]:
"""simple docstring"""
UpperCAmelCase_ = AcceleratorState()
UpperCAmelCase_ = tensor[None].clone().to(state.device )
UpperCAmelCase_ = gather(__A ).cpu()
UpperCAmelCase_ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __A ):
return False
return True
class __snake_case :
def __init__( self : List[Any] , _snake_case : Optional[Any] , _snake_case : List[Any] , _snake_case : Union[str, Any]):
"""simple docstring"""
UpperCAmelCase_ = returncode
UpperCAmelCase_ = stdout
UpperCAmelCase_ = stderr
async def A (__A : List[Any] , __A : Union[str, Any] ) -> Tuple:
"""simple docstring"""
while True:
UpperCAmelCase_ = await stream.readline()
if line:
callback(__A )
else:
break
async def A (__A : Optional[int] , __A : Optional[int]=None , __A : Any=None , __A : List[Any]=None , __A : Any=False , __A : Optional[int]=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print('''\nRunning: ''' , ''' '''.join(__A ) )
UpperCAmelCase_ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def tee(__A : Optional[int] , __A : List[Any] , __A : str , __A : Any="" ):
UpperCAmelCase_ = line.decode('''utf-8''' ).rstrip()
sink.append(__A )
if not quiet:
print(__A , __A , file=__A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __A : tee(__A , __A , sys.stdout , label='''stdout:''' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __A : tee(__A , __A , sys.stderr , label='''stderr:''' ) ) ),
] , timeout=__A , )
return _RunOutput(await p.wait() , __A , __A )
def A (__A : Dict , __A : Any=None , __A : Any=None , __A : List[Any]=180 , __A : Optional[int]=False , __A : Any=True ) -> _RunOutput:
"""simple docstring"""
UpperCAmelCase_ = asyncio.get_event_loop()
UpperCAmelCase_ = loop.run_until_complete(
_stream_subprocess(__A , env=__A , stdin=__A , timeout=__A , quiet=__A , echo=__A ) )
UpperCAmelCase_ = ''' '''.join(__A )
if result.returncode > 0:
UpperCAmelCase_ = '''\n'''.join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
return result
class __snake_case ( a ):
pass
def A (__A : List[str] , __A : Tuple=False ) -> Optional[Any]:
"""simple docstring"""
try:
UpperCAmelCase_ = subprocess.check_output(__A , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__A , '''decode''' ):
UpperCAmelCase_ = output.decode('''utf-8''' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
F"""Command `{" ".join(__A )}` failed with the following error:\n\n{e.output.decode()}""" ) from e
| 51 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import TensorType, logging
if TYPE_CHECKING:
from ...onnx.config import PatchingSpec
from ...tokenization_utils_base import PreTrainedTokenizerBase
_lowerCamelCase : List[Any] = logging.get_logger(__name__)
_lowerCamelCase : Union[str, Any] = {
'''allenai/longformer-base-4096''': '''https://huggingface.co/allenai/longformer-base-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096''': '''https://huggingface.co/allenai/longformer-large-4096/resolve/main/config.json''',
'''allenai/longformer-large-4096-finetuned-triviaqa''': (
'''https://huggingface.co/allenai/longformer-large-4096-finetuned-triviaqa/resolve/main/config.json'''
),
'''allenai/longformer-base-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-base-4096-extra.pos.embd.only/resolve/main/config.json'''
),
'''allenai/longformer-large-4096-extra.pos.embd.only''': (
'''https://huggingface.co/allenai/longformer-large-4096-extra.pos.embd.only/resolve/main/config.json'''
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
_UpperCAmelCase : Dict = "longformer"
def __init__( self : Optional[Any] , lowercase : Union[List[int], int] = 512 , lowercase : int = 2 , lowercase : int = 1 , lowercase : int = 0 , lowercase : int = 2 , lowercase : int = 30_522 , lowercase : int = 768 , lowercase : int = 12 , lowercase : int = 12 , lowercase : int = 3_072 , lowercase : str = "gelu" , lowercase : float = 0.1 , lowercase : float = 0.1 , lowercase : int = 512 , lowercase : int = 2 , lowercase : float = 0.02 , lowercase : float = 1E-12 , lowercase : bool = False , **lowercase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , **lowercase )
_snake_case = attention_window
_snake_case = sep_token_id
_snake_case = bos_token_id
_snake_case = eos_token_id
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = onnx_export
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase ):
'''simple docstring'''
def __init__( self : int , lowercase : "PretrainedConfig" , lowercase : str = "default" , lowercase : "List[PatchingSpec]" = None ):
'''simple docstring'''
super().__init__(lowercase , lowercase , lowercase )
_snake_case = True
@property
def A ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
_snake_case = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('global_attention_mask', dynamic_axis),
] )
@property
def A ( self : int ):
'''simple docstring'''
_snake_case = super().outputs
if self.task == "default":
_snake_case = {0: 'batch'}
return outputs
@property
def A ( self : List[Any] ):
'''simple docstring'''
return 1E-4
@property
def A ( self : List[str] ):
'''simple docstring'''
return max(super().default_onnx_opset , 14 )
def A ( self : str , lowercase : "PreTrainedTokenizerBase" , lowercase : int = -1 , lowercase : int = -1 , lowercase : bool = False , lowercase : Optional[TensorType] = None , ):
'''simple docstring'''
_snake_case = super().generate_dummy_inputs(
preprocessor=lowercase , batch_size=lowercase , seq_length=lowercase , is_pair=lowercase , framework=lowercase )
import torch
# for some reason, replacing this code by inputs["global_attention_mask"] = torch.randint(2, inputs["input_ids"].shape, dtype=torch.int64)
# makes the export fail randomly
_snake_case = torch.zeros_like(inputs['input_ids'] )
# make every second token global
_snake_case = 1
return inputs | 282 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=13 , a__=7 , a__=True , a__=True , a__=True , a__=True , a__=99 , a__=32 , a__=5 , a__=4 , a__=37 , a__="gelu" , a__=0.1 , a__=0.1 , a__=512 , a__=16 , a__=2 , a__=0.0_2 , a__=4 , ):
_lowerCAmelCase : Dict = parent
_lowerCAmelCase : str = batch_size
_lowerCAmelCase : Any = seq_length
_lowerCAmelCase : Optional[int] = is_training
_lowerCAmelCase : Any = use_attention_mask
_lowerCAmelCase : Any = use_token_type_ids
_lowerCAmelCase : Union[str, Any] = use_labels
_lowerCAmelCase : List[Any] = vocab_size
_lowerCAmelCase : str = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : List[str] = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
_lowerCAmelCase : List[str] = attention_probs_dropout_prob
_lowerCAmelCase : Dict = max_position_embeddings
_lowerCAmelCase : List[str] = type_vocab_size
_lowerCAmelCase : Any = type_sequence_label_size
_lowerCAmelCase : Dict = initializer_range
_lowerCAmelCase : List[str] = num_choices
def __A ( self ):
_lowerCAmelCase : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : Any = None
if self.use_attention_mask:
_lowerCAmelCase : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
_lowerCAmelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[str] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=a__ , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def __A ( self ):
_lowerCAmelCase : Dict = self.prepare_config_and_inputs()
_lowerCAmelCase : Optional[Any] = config_and_inputs
_lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class __A ( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_UpperCamelCase : Optional[int] = True
_UpperCamelCase : Optional[int] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def __A ( self ):
_lowerCAmelCase : Any = FlaxRoFormerModelTester(self )
@slow
def __A ( self ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase : List[Any] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" , from_pt=a__ )
_lowerCAmelCase : Tuple = model(np.ones((1, 1) ) )
self.assertIsNotNone(a__ )
@require_flax
class __A ( unittest.TestCase ):
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
_lowerCAmelCase : Dict = jnp.array([[0, 1, 2, 3, 4, 5]] )
_lowerCAmelCase : Union[str, Any] = model(a__ )[0]
_lowerCAmelCase : Tuple = 50000
_lowerCAmelCase : int = (1, 6, vocab_size)
self.assertEqual(output.shape , a__ )
_lowerCAmelCase : List[Any] = jnp.array(
[[[-0.1_2_0_5, -1.0_2_6_5, 0.2_9_2_2], [-1.5_1_3_4, 0.1_9_7_4, 0.1_5_1_9], [-5.0_1_3_5, -3.9_0_0_3, -0.8_4_0_4]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , a__ , atol=1e-4 ) )
| 359 | """simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __A ( unittest.TestCase ):
def __init__( self , a__ , a__=7 , a__=3 , a__=30 , a__=400 , a__=True , a__=None , a__=0.9 , a__=None , a__=True , a__=[0.5, 0.5, 0.5] , a__=[0.5, 0.5, 0.5] , ):
_lowerCAmelCase : int = size if size is not None else {"""shortest_edge""": 30}
_lowerCAmelCase : Dict = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
_lowerCAmelCase : List[Any] = parent
_lowerCAmelCase : Optional[int] = batch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : str = min_resolution
_lowerCAmelCase : Dict = max_resolution
_lowerCAmelCase : str = do_resize_and_center_crop
_lowerCAmelCase : List[str] = size
_lowerCAmelCase : int = crop_pct
_lowerCAmelCase : int = crop_size
_lowerCAmelCase : Union[str, Any] = do_normalize
_lowerCAmelCase : Tuple = image_mean
_lowerCAmelCase : Optional[Any] = image_std
def __A ( self ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __A ( SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
_UpperCamelCase : List[str] = PoolFormerImageProcessor if is_vision_available() else None
def __A ( self ):
_lowerCAmelCase : str = PoolFormerImageProcessingTester(self )
@property
def __A ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a__ , """do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(a__ , """size""" ) )
self.assertTrue(hasattr(a__ , """crop_pct""" ) )
self.assertTrue(hasattr(a__ , """do_normalize""" ) )
self.assertTrue(hasattr(a__ , """image_mean""" ) )
self.assertTrue(hasattr(a__ , """image_std""" ) )
def __A ( self ):
_lowerCAmelCase : List[str] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size , {"""height""": 30, """width""": 30} )
_lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def __A ( self ):
pass
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowerCAmelCase : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , Image.Image )
# Test not batched input
_lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowerCAmelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , numpify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , np.ndarray )
# Test not batched input
_lowerCAmelCase : Dict = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : int = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def __A ( self ):
# Initialize image_processing
_lowerCAmelCase : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowerCAmelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a__ , torchify=a__ )
for image in image_inputs:
self.assertIsInstance(a__ , torch.Tensor )
# Test not batched input
_lowerCAmelCase : str = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_lowerCAmelCase : List[str] = image_processing(a__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
| 126 | 0 |
a =[sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100000)]
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
__lowerCamelCase : Optional[int] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 1_0_0_0_0_0]
number //= 1_0_0_0_0_0
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
a =[None] * 10000000
a =True
a =False
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> bool:
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
__lowerCamelCase : Dict = chain(next_number(lowerCamelCase__ ) )
__lowerCamelCase : Optional[int] = number_chain
while number < 1_0_0_0_0_0_0_0:
__lowerCamelCase : List[Any] = number_chain
number *= 1_0
return number_chain
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ = 1_0_0_0_0_0_0_0 ) -> int:
for i in range(1 , lowerCamelCase__ ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F"""{solution() = }""")
| 73 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( SCREAMING_SNAKE_CASE ):
_UpperCAmelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
_UpperCAmelCase : Union[str, Any] = '''Pix2StructImageProcessor'''
_UpperCAmelCase : Any = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self : List[str] ,SCREAMING_SNAKE_CASE__ : str ,SCREAMING_SNAKE_CASE__ : int):
__lowerCamelCase : List[Any] = False
super().__init__(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__)
def __call__( self : str ,SCREAMING_SNAKE_CASE__ : Any=None ,SCREAMING_SNAKE_CASE__ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Union[bool, str, PaddingStrategy] = False ,SCREAMING_SNAKE_CASE__ : Union[bool, str, TruncationStrategy] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[int] = 2_0_4_8 ,SCREAMING_SNAKE_CASE__ : int = 0 ,SCREAMING_SNAKE_CASE__ : Optional[int] = None ,SCREAMING_SNAKE_CASE__ : Optional[bool] = None ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = False ,SCREAMING_SNAKE_CASE__ : bool = True ,SCREAMING_SNAKE_CASE__ : Optional[Union[str, TensorType]] = None ,**SCREAMING_SNAKE_CASE__ : Dict ,):
if images is None and text is None:
raise ValueError('You have to specify either images or text.')
# Get only text
if images is None and not self.image_processor.is_vqa:
__lowerCamelCase : Tuple = self.tokenizer
__lowerCamelCase : Dict = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
else:
# add pixel_values and bbox
__lowerCamelCase : List[Any] = self.image_processor(
SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,max_patches=SCREAMING_SNAKE_CASE__ ,header_text=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
if text is not None and not self.image_processor.is_vqa:
__lowerCamelCase : List[Any] = self.tokenizer(
text=SCREAMING_SNAKE_CASE__ ,add_special_tokens=SCREAMING_SNAKE_CASE__ ,padding=SCREAMING_SNAKE_CASE__ ,truncation=SCREAMING_SNAKE_CASE__ ,max_length=SCREAMING_SNAKE_CASE__ ,stride=SCREAMING_SNAKE_CASE__ ,pad_to_multiple_of=SCREAMING_SNAKE_CASE__ ,return_attention_mask=SCREAMING_SNAKE_CASE__ ,return_overflowing_tokens=SCREAMING_SNAKE_CASE__ ,return_special_tokens_mask=SCREAMING_SNAKE_CASE__ ,return_offsets_mapping=SCREAMING_SNAKE_CASE__ ,return_token_type_ids=SCREAMING_SNAKE_CASE__ ,return_length=SCREAMING_SNAKE_CASE__ ,verbose=SCREAMING_SNAKE_CASE__ ,return_tensors=SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__ ,)
if "attention_mask" in text_encoding:
__lowerCamelCase : List[Any] = text_encoding.pop('attention_mask')
if "input_ids" in text_encoding:
__lowerCamelCase : Dict = text_encoding.pop('input_ids')
else:
__lowerCamelCase : Optional[int] = None
if text_encoding is not None:
encoding_image_processor.update(SCREAMING_SNAKE_CASE__)
return encoding_image_processor
def lowerCAmelCase ( self : Dict ,*SCREAMING_SNAKE_CASE__ : str ,**SCREAMING_SNAKE_CASE__ : int):
return self.tokenizer.batch_decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
def lowerCAmelCase ( self : List[str] ,*SCREAMING_SNAKE_CASE__ : int ,**SCREAMING_SNAKE_CASE__ : Dict):
return self.tokenizer.decode(*SCREAMING_SNAKE_CASE__ ,**SCREAMING_SNAKE_CASE__)
@property
def lowerCAmelCase ( self : int):
__lowerCamelCase : Dict = self.tokenizer.model_input_names
__lowerCamelCase : int = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
| 73 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase : Optional[Any] = {"""configuration_speech_encoder_decoder""": ["""SpeechEncoderDecoderConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Union[str, Any] = ["""SpeechEncoderDecoderModel"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase : Any = ["""FlaxSpeechEncoderDecoderModel"""]
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 357 |
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowercase : Union[str, Any] = data_utils.TransfoXLTokenizer
lowercase : Optional[int] = data_utils.TransfoXLCorpus
lowercase : List[Any] = data_utils
lowercase : Tuple = data_utils
def A_ ( A__ , A__ , A__ , A__ ) -> Optional[Any]:
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(A__ , 'rb' ) as fp:
a__ : int = pickle.load(A__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
a__ : int = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
a__ : List[Any] = corpus.vocab.__dict__
torch.save(A__ , A__ )
a__ : Dict = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , A__ )
a__ : Optional[int] = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(A__ , A__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
a__ : Union[str, Any] = os.path.abspath(A__ )
a__ : Optional[Any] = os.path.abspath(A__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
a__ : Dict = TransfoXLConfig()
else:
a__ : Dict = TransfoXLConfig.from_json_file(A__ )
print(F'Building PyTorch model from configuration: {config}' )
a__ : Optional[int] = TransfoXLLMHeadModel(A__ )
a__ : int = load_tf_weights_in_transfo_xl(A__ , A__ , A__ )
# Save pytorch-model
a__ : Any = os.path.join(A__ , A__ )
a__ : Dict = os.path.join(A__ , A__ )
print(F'Save PyTorch model to {os.path.abspath(A__ )}' )
torch.save(model.state_dict() , A__ )
print(F'Save configuration file to {os.path.abspath(A__ )}' )
with open(A__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase : List[Any] = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowercase : Any = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
)
| 225 | 0 |
"""simple docstring"""
import argparse
from transformers import (
TapasConfig,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasTokenizer,
load_tf_weights_in_tapas,
)
from transformers.utils import logging
logging.set_verbosity_info()
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
# Initialise PyTorch model.
# If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of
# TapasConfig to False.
# initialize configuration from json file
UpperCAmelCase_ : str = TapasConfig.from_json_file(__lowerCamelCase )
# set absolute/relative position embeddings parameter
UpperCAmelCase_ : Union[str, Any] = reset_position_index_per_cell
# set remaining parameters of TapasConfig as well as the model based on the task
if task == "SQA":
UpperCAmelCase_ : str = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "WTQ":
# run_task_main.py hparams
UpperCAmelCase_ : Union[str, Any] = 4
UpperCAmelCase_ : int = True
# hparam_utils.py hparams
UpperCAmelCase_ : Optional[Any] = 0.66_4694
UpperCAmelCase_ : Tuple = 0.20_7951
UpperCAmelCase_ : Dict = 0.12_1194
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : int = False
UpperCAmelCase_ : str = 0.035_2513
UpperCAmelCase_ : List[Any] = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "WIKISQL_SUPERVISED":
# run_task_main.py hparams
UpperCAmelCase_ : List[str] = 4
UpperCAmelCase_ : List[str] = False
# hparam_utils.py hparams
UpperCAmelCase_ : List[Any] = 36.4519
UpperCAmelCase_ : int = 0.90_3421
UpperCAmelCase_ : Union[str, Any] = 222.088
UpperCAmelCase_ : List[str] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = 0.76_3141
UpperCAmelCase_ : Dict = TapasForQuestionAnswering(config=__lowerCamelCase )
elif task == "TABFACT":
UpperCAmelCase_ : List[Any] = TapasForSequenceClassification(config=__lowerCamelCase )
elif task == "MLM":
UpperCAmelCase_ : Optional[Any] = TapasForMaskedLM(config=__lowerCamelCase )
elif task == "INTERMEDIATE_PRETRAINING":
UpperCAmelCase_ : int = TapasModel(config=__lowerCamelCase )
else:
raise ValueError(f"""Task {task} not supported.""" )
print(f"""Building PyTorch model from configuration: {config}""" )
# Load weights from tf checkpoint
load_tf_weights_in_tapas(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
# Save pytorch-model (weights and configuration)
print(f"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(__lowerCamelCase )
# Save tokenizer files
print(f"""Save tokenizer files to {pytorch_dump_path}""" )
UpperCAmelCase_ : Union[str, Any] = TapasTokenizer(vocab_file=tf_checkpoint_path[:-10] + "vocab.txt", model_max_length=512 )
tokenizer.save_pretrained(__lowerCamelCase )
print("Used relative position embeddings:", model.config.reset_position_index_per_cell )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--task', default='SQA', type=str, help='Model task for which to convert a checkpoint. Defaults to SQA.'
)
parser.add_argument(
'--reset_position_index_per_cell',
default=False,
action='store_true',
help='Whether to use relative position embeddings or not. Defaults to True.',
)
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--tapas_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained TAPAS model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_a = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.task,
args.reset_position_index_per_cell,
args.tf_checkpoint_path,
args.tapas_config_file,
args.pytorch_dump_path,
)
| 61 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44 | 0 |
'''simple docstring'''
A_ : Tuple = 9.8_0665
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = g )-> float:
'''simple docstring'''
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 349 |
'''simple docstring'''
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
A_ : Dict = logging.get_logger(__name__)
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )-> None:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ ), F'''{len(lowerCAmelCase_ )} != {len(lowerCAmelCase_ )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
A_ : Union[str, Any] = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
1_2: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 1_1],
4: [0, 4, 8, 1_1],
6: [0, 2, 4, 7, 9, 1_1],
9: [0, 1, 2, 4, 5, 7, 9, 1_0, 1_1],
1_2: list(range(1_2)),
},
1_6: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 1_5],
3: [0, 8, 1_5],
4: [0, 5, 1_0, 1_5],
6: [0, 3, 6, 9, 1_2, 1_5],
8: [0, 2, 4, 6, 8, 1_0, 1_2, 1_5],
9: [0, 1, 3, 5, 7, 9, 1_1, 1_3, 1_5],
1_2: [0, 1, 2, 3, 4, 5, 6, 7, 9, 1_1, 1_3, 1_5],
1_6: list(range(1_6)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
A_ : int = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
1_2: {1: [1_1], 2: [5, 1_1], 3: [3, 7, 1_1], 6: [1, 3, 5, 8, 1_0, 1_1]},
1_6: {1: [1_5], 4: [4, 9, 1_2, 1_5], 8: [1, 3, 5, 7, 9, 1_1, 1_3, 1_5]},
}
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> Union[str, Any]:
'''simple docstring'''
try:
_UpperCAmelCase : Any = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
F'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
F''' {n_student}''' )
return list(range(lowerCAmelCase_ ) )
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ )-> List[int]:
'''simple docstring'''
if n_student > n_teacher:
raise ValueError(F'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(lowerCAmelCase_ ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case_ ( lowerCAmelCase_ , lowerCAmelCase_ = "student" , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_=False , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ , )-> Tuple[PreTrainedModel, List[int], List[int]]:
'''simple docstring'''
_UpperCAmelCase : List[Any] = """encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."""
assert (e is not None) or (d is not None), _msg
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
AutoTokenizer.from_pretrained(lowerCAmelCase_ ).save_pretrained(lowerCAmelCase_ ) # purely for convenience
_UpperCAmelCase : Any = AutoModelForSeqaSeqLM.from_pretrained(lowerCAmelCase_ ).eval()
else:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), F'''teacher must be a model or string got type {type(lowerCAmelCase_ )}'''
_UpperCAmelCase : str = teacher.config.to_diff_dict()
try:
_UpperCAmelCase ,_UpperCAmelCase : Optional[int] = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
_UpperCAmelCase : Tuple = teacher_e
if d is None:
_UpperCAmelCase : Dict = teacher_d
init_kwargs.update({"""encoder_layers""": e, """decoder_layers""": d} )
except AttributeError: # T5
if hasattr(teacher.config , """num_encoder_layers""" ):
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
_UpperCAmelCase ,_UpperCAmelCase : int = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
_UpperCAmelCase : List[str] = teacher_e
if d is None:
_UpperCAmelCase : str = teacher_d
if hasattr(teacher.config , """num_encoder_layers""" ):
init_kwargs.update({"""num_encoder_layers""": e, """num_decoder_layers""": d} )
else:
init_kwargs.update({"""num_layers""": e, """num_decoder_layers""": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(lowerCAmelCase_ )
# Copy weights
_UpperCAmelCase : Any = teacher.config_class(**lowerCAmelCase_ )
_UpperCAmelCase : Optional[Any] = AutoModelForSeqaSeqLM.from_config(lowerCAmelCase_ )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
_UpperCAmelCase : Optional[Any] = student.load_state_dict(teacher.state_dict() , strict=lowerCAmelCase_ )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
_UpperCAmelCase ,_UpperCAmelCase : Optional[Any] = list(range(lowerCAmelCase_ ) ), list(range(lowerCAmelCase_ ) )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
F''' {save_path}''' )
student.save_pretrained(lowerCAmelCase_ )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
if d_layers_to_copy is None:
_UpperCAmelCase : List[int] = pick_layers_to_copy(lowerCAmelCase_ , lowerCAmelCase_ )
try:
if hasattr(
lowerCAmelCase_ , """prophetnet""" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , lowerCAmelCase_ )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , lowerCAmelCase_ )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , lowerCAmelCase_ )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , lowerCAmelCase_ )
copy_layers(teacher.decoder.block , student.decoder.block , lowerCAmelCase_ )
logger.info(
F'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
_UpperCAmelCase : Dict = {
"""teacher_type""": teacher.config.model_type,
"""copied_encoder_layers""": e_layers_to_copy,
"""copied_decoder_layers""": d_layers_to_copy,
}
student.save_pretrained(lowerCAmelCase_ )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 349 | 1 |
'''simple docstring'''
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A_ ( snake_case , snake_case ):
SCREAMING_SNAKE_CASE:str = checkpoint
SCREAMING_SNAKE_CASE:str = {}
SCREAMING_SNAKE_CASE:Optional[int] = vae_state_dict["encoder.conv_in.weight"]
SCREAMING_SNAKE_CASE:List[Any] = vae_state_dict["encoder.conv_in.bias"]
SCREAMING_SNAKE_CASE:int = vae_state_dict["encoder.conv_out.weight"]
SCREAMING_SNAKE_CASE:str = vae_state_dict["encoder.conv_out.bias"]
SCREAMING_SNAKE_CASE:Union[str, Any] = vae_state_dict["encoder.norm_out.weight"]
SCREAMING_SNAKE_CASE:Any = vae_state_dict["encoder.norm_out.bias"]
SCREAMING_SNAKE_CASE:int = vae_state_dict["decoder.conv_in.weight"]
SCREAMING_SNAKE_CASE:List[Any] = vae_state_dict["decoder.conv_in.bias"]
SCREAMING_SNAKE_CASE:List[Any] = vae_state_dict["decoder.conv_out.weight"]
SCREAMING_SNAKE_CASE:Tuple = vae_state_dict["decoder.conv_out.bias"]
SCREAMING_SNAKE_CASE:Tuple = vae_state_dict["decoder.norm_out.weight"]
SCREAMING_SNAKE_CASE:List[str] = vae_state_dict["decoder.norm_out.bias"]
SCREAMING_SNAKE_CASE:Optional[int] = vae_state_dict["quant_conv.weight"]
SCREAMING_SNAKE_CASE:List[Any] = vae_state_dict["quant_conv.bias"]
SCREAMING_SNAKE_CASE:Any = vae_state_dict["post_quant_conv.weight"]
SCREAMING_SNAKE_CASE:str = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
SCREAMING_SNAKE_CASE:Optional[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
SCREAMING_SNAKE_CASE:List[str] = {
layer_id: [key for key in vae_state_dict if F'''down.{layer_id}''' in key] for layer_id in range(__UpperCamelCase )
}
# Retrieves the keys for the decoder up blocks only
SCREAMING_SNAKE_CASE:List[str] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
SCREAMING_SNAKE_CASE:int = {
layer_id: [key for key in vae_state_dict if F'''up.{layer_id}''' in key] for layer_id in range(__UpperCamelCase )
}
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE:Dict = [key for key in down_blocks[i] if F'''down.{i}''' in key and F'''down.{i}.downsample''' not in key]
if F'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
SCREAMING_SNAKE_CASE:str = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.weight''' )
SCREAMING_SNAKE_CASE:Union[str, Any] = vae_state_dict.pop(
F'''encoder.down.{i}.downsample.conv.bias''' )
SCREAMING_SNAKE_CASE:Tuple = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE:str = {"old": F'''down.{i}.block''', "new": F'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE:Any = [key for key in vae_state_dict if "encoder.mid.block" in key]
SCREAMING_SNAKE_CASE:int = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE:int = [key for key in mid_resnets if F'''encoder.mid.block_{i}''' in key]
SCREAMING_SNAKE_CASE:Union[str, Any] = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE:Union[str, Any] = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE:Tuple = [key for key in vae_state_dict if "encoder.mid.attn" in key]
SCREAMING_SNAKE_CASE:Union[str, Any] = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE:List[Any] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
for i in range(__UpperCamelCase ):
SCREAMING_SNAKE_CASE:Optional[Any] = num_up_blocks - 1 - i
SCREAMING_SNAKE_CASE:List[Any] = [
key for key in up_blocks[block_id] if F'''up.{block_id}''' in key and F'''up.{block_id}.upsample''' not in key
]
if F'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
SCREAMING_SNAKE_CASE:Union[str, Any] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.weight'''
]
SCREAMING_SNAKE_CASE:Union[str, Any] = vae_state_dict[
F'''decoder.up.{block_id}.upsample.conv.bias'''
]
SCREAMING_SNAKE_CASE:List[str] = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE:List[Any] = {"old": F'''up.{block_id}.block''', "new": F'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE:int = [key for key in vae_state_dict if "decoder.mid.block" in key]
SCREAMING_SNAKE_CASE:Optional[int] = 2
for i in range(1 , num_mid_res_blocks + 1 ):
SCREAMING_SNAKE_CASE:Optional[Any] = [key for key in mid_resnets if F'''decoder.mid.block_{i}''' in key]
SCREAMING_SNAKE_CASE:List[Any] = renew_vae_resnet_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE:List[Any] = {"old": F'''mid.block_{i}''', "new": F'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
SCREAMING_SNAKE_CASE:List[Any] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
SCREAMING_SNAKE_CASE:List[str] = renew_vae_attention_paths(__UpperCamelCase )
SCREAMING_SNAKE_CASE:List[str] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , additional_replacements=[meta_path] , config=__UpperCamelCase )
conv_attn_to_linear(__UpperCamelCase )
return new_checkpoint
def A_ ( snake_case , snake_case , ):
SCREAMING_SNAKE_CASE:Dict = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
SCREAMING_SNAKE_CASE:Dict = io.BytesIO(r.content )
SCREAMING_SNAKE_CASE:Union[str, Any] = OmegaConf.load(__UpperCamelCase )
SCREAMING_SNAKE_CASE:List[str] = 512
SCREAMING_SNAKE_CASE:int = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
SCREAMING_SNAKE_CASE:int = {}
with safe_open(__UpperCamelCase , framework="pt" , device="cpu" ) as f:
for key in f.keys():
SCREAMING_SNAKE_CASE:Optional[Any] = f.get_tensor(__UpperCamelCase )
else:
SCREAMING_SNAKE_CASE:str = torch.load(__UpperCamelCase , map_location=__UpperCamelCase )["state_dict"]
# Convert the VAE model.
SCREAMING_SNAKE_CASE:Dict = create_vae_diffusers_config(__UpperCamelCase , image_size=__UpperCamelCase )
SCREAMING_SNAKE_CASE:Dict = custom_convert_ldm_vae_checkpoint(__UpperCamelCase , __UpperCamelCase )
SCREAMING_SNAKE_CASE:str = AutoencoderKL(**__UpperCamelCase )
vae.load_state_dict(__UpperCamelCase )
vae.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
A_ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 139 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Union[str, Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : List[str] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Union[str, Any] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Union[str, Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Dict , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Dict = ["""flax"""]
def __init__( self : Any , *a_ : Optional[int] , **a_ : str ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : str , *a_ : Optional[int] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Dict , **a_ : str ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Optional[int] , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Optional[Any] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Optional[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[Any] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Dict , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Tuple , *a_ : Optional[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[int] , *a_ : List[Any] , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : str , **a_ : Any ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Any , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Optional[Any] , *a_ : Optional[int] , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : int = ["""flax"""]
def __init__( self : Dict , *a_ : str , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : str , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : List[Any] , **a_ : List[Any] ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Any , *a_ : Any , **a_ : int ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : Tuple , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[Any] , *a_ : Dict , **a_ : Dict ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : Union[str, Any] , *a_ : Any , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Dict , *a_ : List[Any] , **a_ : Optional[int] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[Any] , **a_ : Tuple ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Tuple = ["""flax"""]
def __init__( self : Tuple , *a_ : Optional[int] , **a_ : Union[str, Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : List[str] , **a_ : Optional[Any] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : Union[str, Any] , *a_ : Any , **a_ : Any ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : List[str] = ["""flax"""]
def __init__( self : Optional[Any] , *a_ : Optional[Any] , **a_ : Dict ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : List[str] ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : int , **a_ : str ):
requires_backends(cls , ["flax"] )
class __lowerCamelCase ( metaclass=A__ ):
'''simple docstring'''
a_ : Any = ["""flax"""]
def __init__( self : List[str] , *a_ : Optional[Any] , **a_ : List[Any] ):
requires_backends(self , ["flax"] )
@classmethod
def lowerCamelCase ( cls : int , *a_ : Optional[int] , **a_ : Dict ):
requires_backends(cls , ["flax"] )
@classmethod
def lowerCamelCase ( cls : List[str] , *a_ : Union[str, Any] , **a_ : Union[str, Any] ):
requires_backends(cls , ["flax"] )
| 241 | 0 |
import argparse
import intel_extension_for_pytorch as ipex
import torch
from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline
snake_case_ = argparse.ArgumentParser('''Stable Diffusion script with intel optimization''', add_help=False)
parser.add_argument('''--dpm''', action='''store_true''', help='''Enable DPMSolver or not''')
parser.add_argument('''--steps''', default=None, type=int, help='''Num inference steps''')
snake_case_ = parser.parse_args()
snake_case_ = '''cpu'''
snake_case_ = '''a lovely <dicoo> in red dress and hat, in the snowly and brightly night, with many brighly buildings'''
snake_case_ = '''path-to-your-trained-model'''
snake_case_ = StableDiffusionPipeline.from_pretrained(model_id)
if args.dpm:
snake_case_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
snake_case_ = pipe.to(device)
# to channels last
snake_case_ = pipe.unet.to(memory_format=torch.channels_last)
snake_case_ = pipe.vae.to(memory_format=torch.channels_last)
snake_case_ = pipe.text_encoder.to(memory_format=torch.channels_last)
if pipe.requires_safety_checker:
snake_case_ = pipe.safety_checker.to(memory_format=torch.channels_last)
# optimize with ipex
snake_case_ = torch.randn(2, 4, 64, 64)
snake_case_ = torch.rand(1) * 999
snake_case_ = torch.randn(2, 77, 768)
snake_case_ = (sample, timestep, encoder_hidden_status)
try:
snake_case_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True, sample_input=input_example)
except Exception:
snake_case_ = ipex.optimize(pipe.unet.eval(), dtype=torch.bfloataa, inplace=True)
snake_case_ = ipex.optimize(pipe.vae.eval(), dtype=torch.bfloataa, inplace=True)
snake_case_ = ipex.optimize(pipe.text_encoder.eval(), dtype=torch.bfloataa, inplace=True)
if pipe.requires_safety_checker:
snake_case_ = ipex.optimize(pipe.safety_checker.eval(), dtype=torch.bfloataa, inplace=True)
# compute
snake_case_ = 666
snake_case_ = torch.Generator(device).manual_seed(seed)
snake_case_ = {'''generator''': generator}
if args.steps is not None:
snake_case_ = args.steps
with torch.cpu.amp.autocast(enabled=True, dtype=torch.bfloataa):
snake_case_ = pipe(prompt, **generate_kwargs).images[0]
# save image
image.save('''generated.png''')
| 216 |
import numpy as np
def snake_case__ ( SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : List[Any] ):
'''simple docstring'''
lowercase__ : List[Any] = int(np.ceil((x_end - xa) / h ) )
lowercase__ : Dict = np.zeros((n + 1,) )
lowercase__ : Dict = ya
lowercase__ : Union[str, Any] = xa
for k in range(SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[int] = f(SCREAMING_SNAKE_CASE_ , y[k] )
lowercase__ : Dict = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase__ : Any = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
lowercase__ : List[Any] = f(x + h , y[k] + h * ka )
lowercase__ : Tuple = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod()
| 216 | 1 |
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 39 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_a = logging.getLogger()
_a = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __lowerCamelCase ( snake_case__):
"""simple docstring"""
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
_UpperCAmelCase = {'source': 'What is love ?', 'target': 'life'}
_UpperCAmelCase = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
_UpperCAmelCase = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = "pytorch" ):
"""simple docstring"""
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'output' )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
_UpperCAmelCase = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
_UpperCAmelCase = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
_UpperCAmelCase = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
_UpperCAmelCase = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 39 | 1 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__A = logging.get_logger(__name__)
def __a ( lowerCAmelCase_ : Tuple=None ,lowerCAmelCase_ : Optional[Any]=None ) -> Tuple:
'''simple docstring'''
return field(default_factory=lambda: default ,metadata=lowerCAmelCase_ )
@dataclass
class lowercase :
"""simple docstring"""
a__ : List[str] = list_field(
default=[] , metadata={
"help": (
"Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"
" of all available models"
)
} , )
a__ : List[int] = list_field(
default=[8] , metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"})
a__ : List[int] = list_field(
default=[8, 32, 128, 512] , metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."} , )
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."})
a__ : bool = field(default=snake_case__ , metadata={"help": "Use FP16 to accelerate inference."})
a__ : bool = field(default=snake_case__ , metadata={"help": "Benchmark training of model"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Verbose memory tracing"})
a__ : bool = field(
default=snake_case__ , metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."} , )
a__ : bool = field(
default=snake_case__ , metadata={
"help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"
} , )
a__ : bool = field(default=snake_case__ , metadata={"help": "Trace memory line by line"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Save result to a CSV file"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Save all print statements in a log file"})
a__ : bool = field(default=snake_case__ , metadata={"help": "Whether to print environment information"})
a__ : bool = field(
default=snake_case__ , metadata={
"help": (
"Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"
" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"
" for debugging / testing and on TPU."
)
} , )
a__ : str = field(
default=F'inference_time_{round(time())}.csv' , metadata={"help": "CSV filename used if saving time results to csv."} , )
a__ : str = field(
default=F'inference_memory_{round(time())}.csv' , metadata={"help": "CSV filename used if saving memory results to csv."} , )
a__ : str = field(
default=F'train_time_{round(time())}.csv' , metadata={"help": "CSV filename used if saving time results to csv for training."} , )
a__ : str = field(
default=F'train_memory_{round(time())}.csv' , metadata={"help": "CSV filename used if saving memory results to csv for training."} , )
a__ : str = field(
default=F'env_info_{round(time())}.csv' , metadata={"help": "CSV filename used if saving environment information."} , )
a__ : str = field(
default=F'log_{round(time())}.csv' , metadata={"help": "Log filename used if print statements are saved in log."} , )
a__ : int = field(default=3 , metadata={"help": "Times an experiment will be run."})
a__ : bool = field(
default=snake_case__ , metadata={
"help": (
"Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"
" model weights."
)
} , )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
warnings.warn(
F"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
""" are deprecated in general and it is advised to use external Benchmarking libraries """
""" to benchmark Transformer models.""" , __UpperCAmelCase , )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
if len(self.models ) <= 0:
raise ValueError(
"""Please make sure you provide at least one model name / model identifier, *e.g.* `--models"""
""" bert-base-cased` or `args.models = ['bert-base-cased'].""" )
return self.models
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
if not self.multi_process:
return False
elif self.is_tpu:
logger.info("""Multiprocessing is currently not possible on TPU.""" )
return False
else:
return True
| 277 |
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase ( snake_case__):
"""simple docstring"""
def __init__( self : Any , *__UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : Optional[int]=None , **__UpperCAmelCase : Dict ) -> Optional[int]:
super().__init__(*__UpperCAmelCase , **__UpperCAmelCase )
UpperCAmelCase_= eval_examples
UpperCAmelCase_= post_process_function
def _SCREAMING_SNAKE_CASE ( self : Tuple , __UpperCAmelCase : Optional[Dataset] = None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : Optional[List[str]] = None , __UpperCAmelCase : str = "eval" , **__UpperCAmelCase : Any , ) -> Dict[str, float]:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= (
gen_kwargs["""max_length"""] if gen_kwargs.get("""max_length""" ) is not None else self.args.generation_max_length
)
UpperCAmelCase_= (
gen_kwargs["""num_beams"""] if gen_kwargs.get("""num_beams""" ) is not None else self.args.generation_num_beams
)
UpperCAmelCase_= gen_kwargs
UpperCAmelCase_= self.eval_dataset if eval_dataset is None else eval_dataset
UpperCAmelCase_= self.get_eval_dataloader(__UpperCAmelCase )
UpperCAmelCase_= self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Evaluation""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
else:
UpperCAmelCase_= output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(__UpperCAmelCase )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
UpperCAmelCase_= self.callback_handler.on_evaluate(self.args , self.state , self.control , __UpperCAmelCase )
return metrics
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : Dict , __UpperCAmelCase : int , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : str = "test" , **__UpperCAmelCase : List[str] ) -> Tuple:
UpperCAmelCase_= gen_kwargs.copy()
UpperCAmelCase_= self.get_test_dataloader(__UpperCAmelCase )
# Temporarily disable metric computation, we will do it in the loop here.
UpperCAmelCase_= self.compute_metrics
UpperCAmelCase_= None
UpperCAmelCase_= time.time()
UpperCAmelCase_= self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
UpperCAmelCase_= eval_loop(
__UpperCAmelCase , description="""Prediction""" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=__UpperCAmelCase , metric_key_prefix=__UpperCAmelCase , )
finally:
UpperCAmelCase_= compute_metrics
UpperCAmelCase_= self.args.eval_batch_size * self.args.world_size
if F"""{metric_key_prefix}_jit_compilation_time""" in output.metrics:
start_time += output.metrics[F"""{metric_key_prefix}_jit_compilation_time"""]
output.metrics.update(
speed_metrics(
__UpperCAmelCase , __UpperCAmelCase , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
UpperCAmelCase_= self.post_process_function(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , """predict""" )
UpperCAmelCase_= self.compute_metrics(__UpperCAmelCase )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F"""{metric_key_prefix}_""" ):
UpperCAmelCase_= metrics.pop(__UpperCAmelCase )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=__UpperCAmelCase )
| 277 | 1 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a_ ( ):
'''simple docstring'''
lowercase__ : Any = ArgumentParser(
description=(
'PyTorch TPU distributed training launch '
'helper utility that will spawn up '
'multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=_lowerCAmelCase , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=_lowerCAmelCase , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=_lowerCAmelCase )
return parser.parse_args()
def a_ ( ):
'''simple docstring'''
lowercase__ : Union[str, Any] = parse_args()
# Import training_script as a module.
lowercase__ : List[Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
lowercase__ : Dict = script_fpath.stem
lowercase__ : str = importlib.import_module(_lowerCAmelCase )
# Patch sys.argv
lowercase__ : Union[str, Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 77 | """simple docstring"""
import argparse
import os
import shutil
import torch
from emmental.modules import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer
def a_ ( _lowerCAmelCase : str ):
'''simple docstring'''
lowercase__ : int = args.pruning_method
lowercase__ : Tuple = args.threshold
lowercase__ : str = args.model_name_or_path.rstrip('/' )
lowercase__ : List[Any] = args.target_model_path
print(f"""Load fine-pruned model from {model_name_or_path}""" )
lowercase__ : Optional[Any] = torch.load(os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
lowercase__ : List[str] = {}
for name, tensor in model.items():
if "embeddings" in name or "LayerNorm" in name or "pooler" in name:
lowercase__ : Tuple = tensor
print(f"""Copied layer {name}""" )
elif "classifier" in name or "qa_output" in name:
lowercase__ : List[str] = tensor
print(f"""Copied layer {name}""" )
elif "bias" in name:
lowercase__ : Optional[Any] = tensor
print(f"""Copied layer {name}""" )
else:
if pruning_method == "magnitude":
lowercase__ : Optional[Any] = MagnitudeBinarizer.apply(inputs=_lowerCAmelCase , threshold=_lowerCAmelCase )
lowercase__ : Optional[int] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "topK":
if "mask_scores" in name:
continue
lowercase__ : Optional[Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Any = TopKBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[Any] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "sigmoied_threshold":
if "mask_scores" in name:
continue
lowercase__ : Any = name[:-6]
lowercase__ : Optional[Any] = model[f"""{prefix_}mask_scores"""]
lowercase__ : Tuple = ThresholdBinarizer.apply(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase__ : List[str] = tensor * mask
print(f"""Pruned layer {name}""" )
elif pruning_method == "l0":
if "mask_scores" in name:
continue
lowercase__ : Union[str, Any] = name[:-6]
lowercase__ : Optional[int] = model[f"""{prefix_}mask_scores"""]
lowercase__ , lowercase__ : Tuple = -0.1, 1.1
lowercase__ : Optional[Any] = torch.sigmoid(_lowerCAmelCase )
lowercase__ : Optional[Any] = s * (r - l) + l
lowercase__ : Optional[Any] = s_bar.clamp(min=0.0 , max=1.0 )
lowercase__ : Union[str, Any] = tensor * mask
print(f"""Pruned layer {name}""" )
else:
raise ValueError('Unknown pruning method' )
if target_model_path is None:
lowercase__ : Union[str, Any] = os.path.join(
os.path.dirname(_lowerCAmelCase ) , f"""bertarized_{os.path.basename(_lowerCAmelCase )}""" )
if not os.path.isdir(_lowerCAmelCase ):
shutil.copytree(_lowerCAmelCase , _lowerCAmelCase )
print(f"""\nCreated folder {target_model_path}""" )
torch.save(_lowerCAmelCase , os.path.join(_lowerCAmelCase , 'pytorch_model.bin' ) )
print('\nPruned model saved! See you later!' )
if __name__ == "__main__":
_UpperCamelCase : int = argparse.ArgumentParser()
parser.add_argument(
"--pruning_method",
choices=["l0", "magnitude", "topK", "sigmoied_threshold"],
type=str,
required=True,
help=(
"Pruning Method (l0 = L0 regularization, magnitude = Magnitude pruning, topK = Movement pruning,"
" sigmoied_threshold = Soft movement pruning)"
),
)
parser.add_argument(
"--threshold",
type=float,
required=False,
help=(
"For `magnitude` and `topK`, it is the level of remaining weights (in %) in the fine-pruned model."
"For `sigmoied_threshold`, it is the threshold \tau against which the (sigmoied) scores are compared."
"Not needed for `l0`"
),
)
parser.add_argument(
"--model_name_or_path",
type=str,
required=True,
help="Folder containing the model that was previously fine-pruned",
)
parser.add_argument(
"--target_model_path",
default=None,
type=str,
required=False,
help="Folder containing the model that was previously fine-pruned",
)
_UpperCamelCase : Dict = parser.parse_args()
main(args)
| 77 | 1 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class lowerCamelCase ( logging.LoggerAdapter ):
'''simple docstring'''
@staticmethod
def lowercase__ ( lowerCAmelCase_ : List[Any] ) -> Optional[Any]:
'''simple docstring'''
A__ : Any =PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def lowercase__ ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : Optional[int] , *lowerCAmelCase_ : Tuple , **lowerCAmelCase_ : Optional[Any] ) -> List[Any]:
'''simple docstring'''
if PartialState._shared_state == {}:
raise RuntimeError(
"""You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.""" )
A__ : Dict =kwargs.pop("""main_process_only""" , lowerCAmelCase_ )
A__ : int =kwargs.pop("""in_order""" , lowerCAmelCase_ )
if self.isEnabledFor(lowerCAmelCase_ ):
if self._should_log(lowerCAmelCase_ ):
A__ : Optional[Any] =self.process(lowerCAmelCase_ , lowerCAmelCase_ )
self.logger.log(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
elif in_order:
A__ : List[str] =PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
A__ : Optional[Any] =self.process(lowerCAmelCase_ , lowerCAmelCase_ )
self.logger.log(lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ , **lowerCAmelCase_ )
state.wait_for_everyone()
def __lowerCamelCase ( __snake_case : str, __snake_case : str = None ) -> List[Any]:
"""simple docstring"""
if log_level is None:
A__ : Optional[int] =os.environ.get("""ACCELERATE_LOG_LEVEL""", __snake_case )
A__ : Dict =logging.getLogger(__snake_case )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(__snake_case, {} )
| 363 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self : Optional[int] ) -> int:
'''simple docstring'''
A__ : int =tempfile.mkdtemp()
# fmt: off
A__ : Optional[int] =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
A__ : List[Any] =dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
A__ : Tuple =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
A__ : int ={"""unk_token""": """<unk>"""}
A__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
A__ : int =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCAmelCase_ ) )
A__ : Dict ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48145466, 0.4578275, 0.40821073],
"""image_std""": [0.26862954, 0.26130258, 0.27577711],
}
A__ : Dict =os.path.join(self.tmpdirname , lowerCAmelCase_ )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Union[str, Any] ) -> int:
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Optional[Any] , **lowerCAmelCase_ : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Union[str, Any] , **lowerCAmelCase_ : str ) -> Union[str, Any]:
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase__ ( self : Dict ) -> str:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowercase__ ( self : List[str] ) -> Optional[Any]:
'''simple docstring'''
A__ : Dict =[np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
A__ : int =[Image.fromarray(np.moveaxis(lowerCAmelCase_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase__ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
A__ : int =self.get_tokenizer()
A__ : Optional[int] =self.get_rust_tokenizer()
A__ : Any =self.get_image_processor()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_slow.save_pretrained(self.tmpdirname )
A__ : Dict =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=lowerCAmelCase_ )
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
processor_fast.save_pretrained(self.tmpdirname )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , lowerCAmelCase_ )
self.assertIsInstance(processor_fast.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> List[str]:
'''simple docstring'''
A__ : List[str] =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A__ : List[Any] =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
A__ : Union[str, Any] =self.get_image_processor(do_normalize=lowerCAmelCase_ , padding_value=1.0 )
A__ : Optional[int] =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=lowerCAmelCase_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , lowerCAmelCase_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , lowerCAmelCase_ )
def lowercase__ ( self : str ) -> str:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : int =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Dict =self.prepare_image_inputs()
A__ : List[Any] =image_processor(lowerCAmelCase_ , return_tensors="""np""" )
A__ : List[Any] =processor(images=lowerCAmelCase_ , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def lowercase__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : int =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Any ="""lower newer"""
A__ : Optional[int] =processor(text=lowerCAmelCase_ )
A__ : Optional[int] =tokenizer(lowerCAmelCase_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase__ ( self : List[Any] ) -> List[str]:
'''simple docstring'''
A__ : Any =self.get_image_processor()
A__ : Optional[Any] =self.get_tokenizer()
A__ : Union[str, Any] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Optional[Any] ="""lower newer"""
A__ : List[str] =self.prepare_image_inputs()
A__ : Optional[Any] =processor(text=lowerCAmelCase_ , images=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
A__ : Optional[Any] =self.get_image_processor()
A__ : List[str] =self.get_tokenizer()
A__ : Optional[int] =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : Tuple =self.prepare_image_inputs()
A__ : str =self.prepare_image_inputs()
A__ : int =processor(images=lowerCAmelCase_ , visual_prompt=lowerCAmelCase_ )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCAmelCase_ ):
processor()
def lowercase__ ( self : Optional[Any] ) -> str:
'''simple docstring'''
A__ : List[str] =self.get_image_processor()
A__ : Optional[int] =self.get_tokenizer()
A__ : Any =CLIPSegProcessor(tokenizer=lowerCAmelCase_ , image_processor=lowerCAmelCase_ )
A__ : List[Any] =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A__ : Tuple =processor.batch_decode(lowerCAmelCase_ )
A__ : List[Any] =tokenizer.batch_decode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
| 136 | 0 |
"""simple docstring"""
from __future__ import annotations
__UpperCAmelCase = [True] * 1_00_00_01
__UpperCAmelCase = 2
while i * i <= 1_00_00_00:
if seive[i]:
for j in range(i * i, 1_00_00_01, i):
__UpperCAmelCase = False
i += 1
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
return seive[n]
def _snake_case ( lowercase__ : int ) -> bool:
'''simple docstring'''
return any(digit in """02468""" for digit in str(lowercase__ ) )
def _snake_case ( lowercase__ : int = 1_0_0_0_0_0_0 ) -> list[int]:
'''simple docstring'''
lowerCAmelCase_ :Tuple = [2] # result already includes the number 2.
for num in range(3 , limit + 1 , 2 ):
if is_prime(lowercase__ ) and not contains_an_even_digit(lowercase__ ):
lowerCAmelCase_ :str = str(lowercase__ )
lowerCAmelCase_ :str = [int(str_num[j:] + str_num[:j] ) for j in range(len(lowercase__ ) )]
if all(is_prime(lowercase__ ) for i in list_nums ):
result.append(lowercase__ )
return result
def _snake_case ( ) -> int:
'''simple docstring'''
return len(find_circular_primes() )
if __name__ == "__main__":
print(F"""{len(find_circular_primes()) = }""")
| 84 |
"""simple docstring"""
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( lowercase__ : Optional[Any] ) -> str:
'''simple docstring'''
lowerCAmelCase_ :str = OrderedDict()
for key, value in state_dict.items():
if key.startswith("""module.encoder""" ):
lowerCAmelCase_ :Union[str, Any] = key.replace("""module.encoder""" , """glpn.encoder""" )
if key.startswith("""module.decoder""" ):
lowerCAmelCase_ :Any = key.replace("""module.decoder""" , """decoder.stages""" )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCAmelCase_ :List[str] = key[key.find("""patch_embed""" ) + len("""patch_embed""" )]
lowerCAmelCase_ :Tuple = key.replace(f"""patch_embed{idx}""" , f"""patch_embeddings.{int(lowercase__ )-1}""" )
if "norm" in key:
lowerCAmelCase_ :Dict = key.replace("""norm""" , """layer_norm""" )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCAmelCase_ :str = key[key.find("""glpn.encoder.layer_norm""" ) + len("""glpn.encoder.layer_norm""" )]
lowerCAmelCase_ :str = key.replace(f"""layer_norm{idx}""" , f"""layer_norm.{int(lowercase__ )-1}""" )
if "layer_norm1" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""layer_norm1""" , """layer_norm_1""" )
if "layer_norm2" in key:
lowerCAmelCase_ :str = key.replace("""layer_norm2""" , """layer_norm_2""" )
if "block" in key:
# replace for example block1 by block.0
lowerCAmelCase_ :List[str] = key[key.find("""block""" ) + len("""block""" )]
lowerCAmelCase_ :int = key.replace(f"""block{idx}""" , f"""block.{int(lowercase__ )-1}""" )
if "attn.q" in key:
lowerCAmelCase_ :Tuple = key.replace("""attn.q""" , """attention.self.query""" )
if "attn.proj" in key:
lowerCAmelCase_ :Optional[int] = key.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in key:
lowerCAmelCase_ :str = key.replace("""attn""" , """attention.self""" )
if "fc1" in key:
lowerCAmelCase_ :List[Any] = key.replace("""fc1""" , """dense1""" )
if "fc2" in key:
lowerCAmelCase_ :Optional[Any] = key.replace("""fc2""" , """dense2""" )
if "linear_pred" in key:
lowerCAmelCase_ :List[str] = key.replace("""linear_pred""" , """classifier""" )
if "linear_fuse" in key:
lowerCAmelCase_ :str = key.replace("""linear_fuse.conv""" , """linear_fuse""" )
lowerCAmelCase_ :Any = key.replace("""linear_fuse.bn""" , """batch_norm""" )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCAmelCase_ :str = key[key.find("""linear_c""" ) + len("""linear_c""" )]
lowerCAmelCase_ :Optional[int] = key.replace(f"""linear_c{idx}""" , f"""linear_c.{int(lowercase__ )-1}""" )
if "bot_conv" in key:
lowerCAmelCase_ :Union[str, Any] = key.replace("""bot_conv""" , """0.convolution""" )
if "skip_conv1" in key:
lowerCAmelCase_ :int = key.replace("""skip_conv1""" , """1.convolution""" )
if "skip_conv2" in key:
lowerCAmelCase_ :str = key.replace("""skip_conv2""" , """2.convolution""" )
if "fusion1" in key:
lowerCAmelCase_ :Any = key.replace("""fusion1""" , """1.fusion""" )
if "fusion2" in key:
lowerCAmelCase_ :List[str] = key.replace("""fusion2""" , """2.fusion""" )
if "fusion3" in key:
lowerCAmelCase_ :Dict = key.replace("""fusion3""" , """3.fusion""" )
if "fusion" in key and "conv" in key:
lowerCAmelCase_ :Any = key.replace("""conv""" , """convolutional_layer""" )
if key.startswith("""module.last_layer_depth""" ):
lowerCAmelCase_ :Tuple = key.replace("""module.last_layer_depth""" , """head.head""" )
lowerCAmelCase_ :List[Any] = value
return new_state_dict
def _snake_case ( lowercase__ : str , lowercase__ : int ) -> str:
'''simple docstring'''
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCAmelCase_ :Tuple = state_dict.pop(f"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCAmelCase_ :Optional[Any] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCAmelCase_ :Union[str, Any] = kv_bias[: config.hidden_sizes[i]]
lowerCAmelCase_ :List[Any] = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCAmelCase_ :int = kv_bias[config.hidden_sizes[i] :]
def _snake_case ( ) -> Any:
'''simple docstring'''
lowerCAmelCase_ :int = """http://images.cocodataset.org/val2017/000000039769.jpg"""
lowerCAmelCase_ :Optional[Any] = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( lowercase__ : List[Any] , lowercase__ : str , lowercase__ : Dict=False , lowercase__ : List[Any]=None ) -> int:
'''simple docstring'''
lowerCAmelCase_ :int = GLPNConfig(hidden_sizes=[6_4, 1_2_8, 3_2_0, 5_1_2] , decoder_hidden_size=6_4 , depths=[3, 8, 2_7, 3] )
# load image processor (only resize + rescale)
lowerCAmelCase_ :Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCAmelCase_ :List[Any] = prepare_img()
lowerCAmelCase_ :int = image_processor(images=lowercase__ , return_tensors="""pt""" ).pixel_values
logger.info("""Converting model...""" )
# load original state dict
lowerCAmelCase_ :Tuple = torch.load(lowercase__ , map_location=torch.device("""cpu""" ) )
# rename keys
lowerCAmelCase_ :Union[str, Any] = rename_keys(lowercase__ )
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
lowerCAmelCase_ :List[Any] = GLPNForDepthEstimation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
lowerCAmelCase_ :Dict = model(lowercase__ )
lowerCAmelCase_ :Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCAmelCase_ :Optional[Any] = torch.tensor(
[[4.4147, 4.0873, 4.0673], [3.7890, 3.2881, 3.1525], [3.7674, 3.5423, 3.4913]] )
elif "kitti" in model_name:
lowerCAmelCase_ :Any = torch.tensor(
[[3.4291, 2.7865, 2.5151], [3.2841, 2.7021, 2.3502], [3.1147, 2.4625, 2.2481]] )
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
lowerCAmelCase_ :Union[str, Any] = torch.Size([1, 4_8_0, 6_4_0] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , lowercase__ , atol=1E-4 )
print("""Looks ok!""" )
# finally, push to hub if required
if push_to_hub:
logger.info("""Pushing model and image processor to the hub...""" )
model.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add model""" , use_temp_dir=lowercase__ , )
image_processor.push_to_hub(
repo_path_or_name=Path(lowercase__ , lowercase__ ) , organization="""nielsr""" , commit_message="""Add image processor""" , use_temp_dir=lowercase__ , )
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path',
default=None,
type=str,
help='Path to the original PyTorch checkpoint (.pth file).',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether to upload the model to the HuggingFace hub.'
)
parser.add_argument(
'--model_name',
default='glpn-kitti',
type=str,
help='Name of the model in case you\'re pushing to the hub.',
)
__UpperCAmelCase = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 84 | 1 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
lowerCAmelCase = {'''tokenization_byt5''': ['''ByT5Tokenizer''']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 358 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase = logging.get_logger(__name__)
class A ( A_ ):
UpperCamelCase_ : Dict =['''audio_values''', '''audio_mask''']
def __init__(self , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=1 , lowerCAmelCase=[1_6, 1_6] , lowerCAmelCase=1_2_8 , lowerCAmelCase=4_4_1_0_0 , lowerCAmelCase=8_6 , lowerCAmelCase=2_0_4_8 , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(
feature_size=lowerCAmelCase , sampling_rate=lowerCAmelCase , padding_value=lowerCAmelCase , **lowerCAmelCase , )
__lowercase= spectrogram_length
__lowercase= num_channels
__lowercase= patch_size
__lowercase= feature_size // self.patch_size[1]
__lowercase= n_fft
__lowercase= sampling_rate // hop_length_to_sampling_rate
__lowercase= sampling_rate
__lowercase= padding_value
__lowercase= mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=lowerCAmelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=lowerCAmelCase , norm='slaney' , mel_scale='slaney' , ).T
def _A (self , lowerCAmelCase ):
__lowercase= spectrogram(
lowerCAmelCase , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__lowercase= log_spec[:, :-1]
__lowercase= log_spec - 20.0
__lowercase= np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__(self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = False , lowerCAmelCase = False , **lowerCAmelCase , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
f' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__lowercase= isinstance(lowerCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
__lowercase= is_batched_numpy or (
isinstance(lowerCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase= [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(lowerCAmelCase , np.ndarray ):
__lowercase= np.asarray(lowerCAmelCase , dtype=np.floataa )
elif isinstance(lowerCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase= raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase= [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__lowercase= [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , lowerCAmelCase ):
__lowercase= [np.asarray(lowerCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__lowercase= max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__lowercase= [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__lowercase= np.array(lowerCAmelCase ).astype(np.floataa )
# convert into correct format for padding
__lowercase= max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__lowercase= np.ones([len(lowerCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__lowercase= padded_audio_features * self.padding_value
for i in range(len(lowerCAmelCase ) ):
__lowercase= audio_features[i]
__lowercase= feature
# return as BatchFeature
if return_attention_mask:
__lowercase= {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__lowercase= {'audio_values': padded_audio_features}
__lowercase= BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
return encoded_inputs
| 304 | 0 |
'''simple docstring'''
UpperCamelCase__ : Union[str, Any] = '0.18.2'
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 344 |
'''simple docstring'''
def UpperCAmelCase ( a_ , a_ ) -> Optional[int]:
"""simple docstring"""
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""" )
for i in range(a_ ):
for j in range(a_ ):
if dist[i][j] != float("""inf""" ):
print(int(dist[i][j] ) , end="""\t""" )
else:
print("""INF""" , end="""\t""" )
print()
def UpperCAmelCase ( a_ , a_ ) -> Tuple:
"""simple docstring"""
A_ : List[str] = [[float("""inf""" ) for _ in range(a_ )] for _ in range(a_ )]
for i in range(a_ ):
for j in range(a_ ):
A_ : List[Any] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(a_ ):
# looping through rows of graph array
for i in range(a_ ):
# looping through columns of graph array
for j in range(a_ ):
if (
dist[i][k] != float("""inf""" )
and dist[k][j] != float("""inf""" )
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : List[str] = dist[i][k] + dist[k][j]
_print_dist(a_ , a_ )
return dist, v
if __name__ == "__main__":
UpperCamelCase__ : Tuple = int(input('Enter number of vertices: '))
UpperCamelCase__ : int = int(input('Enter number of edges: '))
UpperCamelCase__ : Dict = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
UpperCamelCase__ : Union[str, Any] = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
UpperCamelCase__ : Union[str, Any] = int(input('Enter source:'))
UpperCamelCase__ : int = int(input('Enter destination:'))
UpperCamelCase__ : Optional[Any] = float(input('Enter weight:'))
UpperCamelCase__ : Any = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 344 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Optional[int] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
'''alibaba-damo/mgp-str-base''': '''https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json''',
}
class __lowerCamelCase ( __lowercase ):
__UpperCamelCase = 'mgp-str'
def __init__(self , lowerCamelCase=[32, 128] , lowerCamelCase=4 , lowerCamelCase=3 , lowerCamelCase=27 , lowerCamelCase=38 , lowerCamelCase=50_257 , lowerCamelCase=30_522 , lowerCamelCase=768 , lowerCamelCase=12 , lowerCamelCase=12 , lowerCamelCase=4.0 , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1e-5 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=False , lowerCamelCase=0.02 , **lowerCamelCase , ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
_lowerCAmelCase = image_size
_lowerCAmelCase = patch_size
_lowerCAmelCase = num_channels
_lowerCAmelCase = max_token_length
_lowerCAmelCase = num_character_labels
_lowerCAmelCase = num_bpe_labels
_lowerCAmelCase = num_wordpiece_labels
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = mlp_ratio
_lowerCAmelCase = distilled
_lowerCAmelCase = layer_norm_eps
_lowerCAmelCase = drop_rate
_lowerCAmelCase = qkv_bias
_lowerCAmelCase = attn_drop_rate
_lowerCAmelCase = drop_path_rate
_lowerCAmelCase = output_aa_attentions
_lowerCAmelCase = initializer_range | 362 |
"""simple docstring"""
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
SCREAMING_SNAKE_CASE : Any = (7_2_0, 1_2_8_0) # Height, Width
SCREAMING_SNAKE_CASE : List[str] = (0.4, 0.6) # if height or width lower than this scale, drop it.
SCREAMING_SNAKE_CASE : List[Any] = 1 / 1_0_0
SCREAMING_SNAKE_CASE : Optional[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = ''''''
SCREAMING_SNAKE_CASE : List[Any] = ''''''
SCREAMING_SNAKE_CASE : Dict = 2_5_0
def __UpperCAmelCase ( ) -> None:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase = get_dataset(snake_case_ , snake_case_ )
for index in range(snake_case_ ):
_lowerCAmelCase = random.sample(range(len(snake_case_ ) ) , 4 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = update_image_and_anno(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ , filter_scale=snake_case_ , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
_lowerCAmelCase = random_chars(32 )
_lowerCAmelCase = path.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
_lowerCAmelCase = F"""{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}"""
cva.imwrite(F"""{file_root}.jpg""" , snake_case_ , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F"""Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}""" )
_lowerCAmelCase = []
for anno in new_annos:
_lowerCAmelCase = anno[3] - anno[1]
_lowerCAmelCase = anno[4] - anno[2]
_lowerCAmelCase = anno[1] + width / 2
_lowerCAmelCase = anno[2] + height / 2
_lowerCAmelCase = F"""{anno[0]} {x_center} {y_center} {width} {height}"""
annos_list.append(snake_case_ )
with open(F"""{file_root}.txt""" , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def __UpperCAmelCase ( snake_case_ : str , snake_case_ : str ) -> tuple[list, list]:
"""simple docstring"""
_lowerCAmelCase = []
_lowerCAmelCase = []
for label_file in glob.glob(os.path.join(snake_case_ , """*.txt""" ) ):
_lowerCAmelCase = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(snake_case_ ) as in_file:
_lowerCAmelCase = in_file.readlines()
_lowerCAmelCase = os.path.join(snake_case_ , F"""{label_name}.jpg""" )
_lowerCAmelCase = []
for obj_list in obj_lists:
_lowerCAmelCase = obj_list.rstrip("""\n""" ).split(""" """ )
_lowerCAmelCase = float(obj[1] ) - float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) - float(obj[4] ) / 2
_lowerCAmelCase = float(obj[1] ) + float(obj[3] ) / 2
_lowerCAmelCase = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(snake_case_ )
labels.append(snake_case_ )
return img_paths, labels
def __UpperCAmelCase ( snake_case_ : list , snake_case_ : list , snake_case_ : list[int] , snake_case_ : tuple[int, int] , snake_case_ : tuple[float, float] , snake_case_ : float = 0.0 , ) -> tuple[list, list, str]:
"""simple docstring"""
_lowerCAmelCase = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
_lowerCAmelCase = int(scale_x * output_size[1] )
_lowerCAmelCase = int(scale_y * output_size[0] )
_lowerCAmelCase = []
_lowerCAmelCase = []
for i, index in enumerate(snake_case_ ):
_lowerCAmelCase = all_img_list[index]
path_list.append(snake_case_ )
_lowerCAmelCase = all_annos[index]
_lowerCAmelCase = cva.imread(snake_case_ )
if i == 0: # top-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
_lowerCAmelCase = cva.resize(snake_case_ , (output_size[1] - divid_point_x, divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = bbox[2] * scale_y
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
_lowerCAmelCase = cva.resize(snake_case_ , (divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = bbox[1] * scale_x
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = bbox[3] * scale_x
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
_lowerCAmelCase = cva.resize(
snake_case_ , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
_lowerCAmelCase = img
for bbox in img_annos:
_lowerCAmelCase = scale_x + bbox[1] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[2] * (1 - scale_y)
_lowerCAmelCase = scale_x + bbox[3] * (1 - scale_x)
_lowerCAmelCase = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
_lowerCAmelCase = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __UpperCAmelCase ( snake_case_ : int ) -> str:
"""simple docstring"""
assert number_char > 1, "The number of character should greater than 1"
_lowerCAmelCase = ascii_lowercase + digits
return "".join(random.choice(snake_case_ ) for _ in range(snake_case_ ) )
if __name__ == "__main__":
main()
print('''DONE ✅''') | 317 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class __lowercase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : List[str]):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
SCREAMING_SNAKE_CASE_: Optional[Any] = [[1, 2, 4], [1, 2, 3, 4]]
SCREAMING_SNAKE_CASE_: Any = DisjunctiveConstraint(lowerCAmelCase__)
self.assertTrue(isinstance(dc.token_ids , lowerCAmelCase__))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]]))
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4]), torch.LongTensor([1, 2, 3, 4, 5])])
def _SCREAMING_SNAKE_CASE ( self : Optional[int]):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(lowerCAmelCase__):
DisjunctiveConstraint(lowerCAmelCase__) # fails here
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: List[str] = [[1, 2, 3], [1, 2, 4]]
SCREAMING_SNAKE_CASE_: Tuple = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: int = dc.update(1)
SCREAMING_SNAKE_CASE_: Dict = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: str = dc.update(2)
SCREAMING_SNAKE_CASE_: Optional[Any] = stepped is True and completed is False and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(3)
SCREAMING_SNAKE_CASE_: Tuple = stepped is True and completed is True and reset is False
self.assertTrue(lowerCAmelCase__)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3])
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
SCREAMING_SNAKE_CASE_: Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
SCREAMING_SNAKE_CASE_: List[Any] = DisjunctiveConstraint(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Dict = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(4)
self.assertTrue(not dc.completed)
self.assertTrue(dc.current_seq == [1, 2, 4])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Optional[Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5])
dc.reset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: List[Any] = dc.update(1)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 3)
self.assertTrue(dc.current_seq == [1])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Tuple = dc.update(2)
self.assertTrue(not dc.completed)
self.assertTrue(dc.remaining() == 2)
self.assertTrue(dc.current_seq == [1, 2])
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_: Union[str, Any] = dc.update(5)
self.assertTrue(dc.completed) # Completed!
self.assertTrue(dc.remaining() == 0)
self.assertTrue(dc.current_seq == [1, 2, 5])
| 13 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase : Any = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : int = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : str = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 280 | 0 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv("TEST_SAGEMAKER" , "False" ) ) is not True , reason="Skipping test because should only be run when releasing minor transformers version" , )
@pytest.mark.usefixtures("sm_env" )
@parameterized_class(
[
{
"framework": "pytorch",
"script": "run_glue.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_50, "eval_accuracy": 0.6, "eval_loss": 0.9},
},
{
"framework": "tensorflow",
"script": "run_tf.py",
"model_name_or_path": "distilbert-base-cased",
"instance_type": "ml.g4dn.xlarge",
"results": {"train_runtime": 6_00, "eval_accuracy": 0.3, "eval_loss": 0.9},
},
] )
class a_ ( unittest.TestCase ):
def __a ( self :Union[str, Any]) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
f"cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py".split() , encoding='''utf-8''' , check=_lowercase , )
assert hasattr(self , '''env''')
def __a ( self :Tuple , _lowercase :Tuple=1) -> Dict:
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=f"{self.env.base_job_name}-single" , instance_count=_lowercase , instance_type=self.instance_type , debugger_hook_config=_lowercase , hyperparameters={**self.env.hyperparameters, '''model_name_or_path''': self.model_name_or_path} , metric_definitions=self.env.metric_definitions , py_version='''py36''' , )
def __a ( self :Union[str, Any] , _lowercase :Any) -> List[Any]:
TrainingJobAnalytics(_lowercase).export_csv(f"{self.env.test_path}/{job_name}_metrics.csv")
def __a ( self :int) -> str:
# create estimator
UpperCAmelCase_ = self.create_estimator()
# run training
estimator.fit()
# result dataframe
UpperCAmelCase_ = TrainingJobAnalytics(estimator.latest_training_job.name).dataframe()
# extract kpis
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''])
UpperCAmelCase_ = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''])
# get train time from SageMaker job, this includes starting, preprocessing, stopping
UpperCAmelCase_ = (
Session().describe_training_job(estimator.latest_training_job.name).get('''TrainingTimeInSeconds''' , 999999)
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy)
assert all(t <= self.results['''eval_loss'''] for t in eval_loss)
# dump tests result into json file to share in PR
with open(f"{estimator.latest_training_job.name}.json" , '''w''') as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , _lowercase)
| 366 |
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
UpperCamelCase_ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(f"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
UpperCamelCase_ = "sshleifer/student_marian_en_ro_6_1"
UpperCamelCase_ = "sshleifer/tiny-mbart"
@require_torch
class a_ ( _snake_case ):
def __a ( self :str , _lowercase :Any=False , _lowercase :Tuple=None , _lowercase :Dict=True , _lowercase :Tuple=True , _lowercase :List[Any]=True , _lowercase :List[str]=True , ) -> int:
UpperCAmelCase_ = self.run_trainer(
eval_steps=1 , max_len=12 , model_name=_lowercase , num_train_epochs=1 , distributed=_lowercase , extra_args_str=_lowercase , predict_with_generate=_lowercase , do_train=_lowercase , do_eval=_lowercase , do_predict=_lowercase , )
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
if not do_eval:
return
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
UpperCAmelCase_ = eval_metrics[-1]
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
assert not math.isnan(float(last_step_stats['''eval_loss'''])), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __a ( self :Dict) -> str:
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __a ( self :Any) -> int:
self.run_seqaseq_quick(distributed=_lowercase)
@require_torch_multi_gpu
def __a ( self :int) -> Any:
self.run_seqaseq_quick(distributed=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Tuple) -> List[str]:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp simple --fp16''')
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :Union[str, Any]) -> Any:
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2''' , predict_with_generate=_lowercase)
@unittest.skip('''Requires an update of the env running those tests''')
@require_torch_multi_gpu
@require_fairscale
def __a ( self :int) -> Any:
self.run_seqaseq_quick(
distributed=_lowercase , extra_args_str='''--sharded_ddp zero_dp_2 --fp16''' , predict_with_generate=_lowercase)
@require_apex
@require_torch_gpu
def __a ( self :Tuple) -> str:
# XXX: apex breaks the trainer if it's run twice e.g. run_seq2seq.main() from the same
# program and it breaks other tests that run from the same pytest worker, therefore until this is
# sorted out it must be run only in an external program, that is distributed=True in this
# test and only under one or more gpus - if we want cpu will need to make a special test
#
# specifically to the problem traced it to self.optimizer.step() - if it's run 2nd time via
# 2nd main() call it botches the future eval.
#
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=_lowercase , extra_args_str='''--fp16 --fp16_backend=apex''')
@parameterized.expand(['''base''', '''low''', '''high''', '''mixed'''])
@require_torch_multi_gpu
def __a ( self :str , _lowercase :Any) -> List[str]:
# as each sub-test is slow-ish split into multiple sub-tests to avoid CI timeout
UpperCAmelCase_ = {
# test with the default log_level - should be info and thus log info once
'''base''': {'''extra_args_str''': '''''', '''n_matches''': 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
'''low''': {'''extra_args_str''': '''--log_level debug --log_level_replica debug''', '''n_matches''': 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
'''high''': {'''extra_args_str''': '''--log_level error --log_level_replica debug''', '''n_matches''': 1},
# test with high log_level and log_level_replica - should be quiet on all processes
'''mixed''': {'''extra_args_str''': '''--log_level error --log_level_replica error''', '''n_matches''': 0},
}
UpperCAmelCase_ = experiments[experiment_id]
UpperCAmelCase_ = {'''distributed''': True, '''predict_with_generate''': False, '''do_eval''': False, '''do_predict''': False}
UpperCAmelCase_ = '''Running training'''
with CaptureStderr() as cl:
self.run_seqaseq_quick(**_lowercase , extra_args_str=data['''extra_args_str'''])
UpperCAmelCase_ = len(re.findall(_lowercase , cl.err))
self.assertEqual(_lowercase , data['''n_matches'''])
@slow
def __a ( self :Any) -> Dict:
UpperCAmelCase_ = self.run_trainer(
eval_steps=2 , max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=10 , distributed=_lowercase , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(os.path.join(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = [log for log in logs if '''eval_loss''' in log.keys()]
UpperCAmelCase_ = eval_metrics[0]
UpperCAmelCase_ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats['''eval_bleu'''] , _lowercase)
# test if do_predict saves generations and metrics
UpperCAmelCase_ = os.listdir(_lowercase)
UpperCAmelCase_ = {os.path.basename(_lowercase) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __a ( self :List[str]) -> str:
from transformers.training_args import OptimizerNames
def train_and_return_metrics(_lowercase :str) -> Tuple[int, float]:
UpperCAmelCase_ = '''--skip_memory_metrics 0'''
UpperCAmelCase_ = self.run_trainer(
max_len=128 , model_name=_lowercase , learning_rate=3E-4 , num_train_epochs=1 , optim=_lowercase , distributed=_lowercase , extra_args_str=_lowercase , do_eval=_lowercase , do_predict=_lowercase , n_gpus_to_use=1 , )
# Check metrics
UpperCAmelCase_ = TrainerState.load_from_json(Path(_lowercase , '''trainer_state.json''')).log_history
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_peaked_delta'''] / 2**20)
UpperCAmelCase_ = int(logs[0]['''train_mem_gpu_alloc_delta'''] / 2**20)
UpperCAmelCase_ = logs[0]['''train_loss''']
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value)
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value)
UpperCAmelCase_ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_peak_mem_orig + gpu_alloc_mem_orig
UpperCAmelCase_ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
UpperCAmelCase_ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
UpperCAmelCase_ = 120
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"
f" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB" , )
self.assertGreater(
_lowercase , _lowercase , '''should use ~150MB less total gpu memory with BNB, compared to without it for this model but got'''
f" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"
f" gpu_total_mem_bnb={gpu_total_mem_bnb}MB" , )
self.assertEqual(
_lowercase , _lowercase , f"loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}")
def __a ( self :Any , _lowercase :int , _lowercase :str , _lowercase :int , _lowercase :float = 3E-3 , _lowercase :str = "adafactor" , _lowercase :bool = False , _lowercase :str = None , _lowercase :int = 0 , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :bool = True , _lowercase :int = None , ) -> List[Any]:
UpperCAmelCase_ = self.test_file_dir / '''../fixtures/tests_samples/wmt_en_ro'''
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = f"\n --model_name_or_path {model_name}\n --train_file {data_dir}/train.json\n --validation_file {data_dir}/val.json\n --test_file {data_dir}/test.json\n --output_dir {output_dir}\n --overwrite_output_dir\n --max_train_samples 8\n --max_source_length {max_len}\n --max_target_length {max_len}\n --do_train\n --num_train_epochs {str(_lowercase)}\n --per_device_train_batch_size 4\n --learning_rate {learning_rate}\n --warmup_steps 8\n --logging_steps 0\n --logging_strategy no\n --save_steps {str(_lowercase)}\n --group_by_length\n --label_smoothing_factor 0.1\n --target_lang ro_RO\n --source_lang en_XX\n ".split()
UpperCAmelCase_ = f"\n --do_eval\n --per_device_eval_batch_size 4\n --max_eval_samples 8\n --val_max_target_length {max_len}\n --evaluation_strategy steps\n --eval_steps {str(_lowercase)}\n ".split()
UpperCAmelCase_ = '''
--do_predict
'''.split()
UpperCAmelCase_ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"--optim {optim}".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
UpperCAmelCase_ = get_gpu_count()
UpperCAmelCase_ = get_torch_dist_unique_port()
UpperCAmelCase_ = f"\n -m torch.distributed.run\n --nproc_per_node={n_gpus_to_use}\n --master_port={master_port}\n {self.examples_dir_str}/pytorch/translation/run_translation.py\n ".split()
UpperCAmelCase_ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(_lowercase , env=self.get_env())
else:
UpperCAmelCase_ = ['''run_translation.py'''] + args
with patch.object(_lowercase , '''argv''' , _lowercase):
main()
return output_dir
| 344 | 0 |
from collections import defaultdict
from math import gcd
def lowerCamelCase_ ( _a : int = 150_0000 ):
'''simple docstring'''
UpperCAmelCase_ : defaultdict = defaultdict(_a )
UpperCAmelCase_ : Optional[Any] = 2
while 2 * euclid_m * (euclid_m + 1) <= limit:
for euclid_n in range((euclid_m % 2) + 1 , _a , 2 ):
if gcd(_a , _a ) > 1:
continue
UpperCAmelCase_ : Union[str, Any] = 2 * euclid_m * (euclid_m + euclid_n)
for perimeter in range(_a , limit + 1 , _a ):
frequencies[perimeter] += 1
euclid_m += 1
return sum(1 for frequency in frequencies.values() if frequency == 1 )
if __name__ == "__main__":
print(F"{solution() = }")
| 345 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def lowerCamelCase_ ( _a : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_a , _a )
def lowerCamelCase_ ( _a : Any ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = emb.weight.shape
UpperCAmelCase_ : Tuple = nn.Linear(_a , _a , bias=_a )
UpperCAmelCase_ : List[Any] = emb.weight.data
return lin_layer
def lowerCamelCase_ ( _a : Dict ):
'''simple docstring'''
UpperCAmelCase_ : int = torch.load(_a , map_location="""cpu""" )
UpperCAmelCase_ : Dict = Namespace(**checkpoint["""cfg"""]["""model"""] )
UpperCAmelCase_ : Optional[int] = checkpoint["""model"""]
remove_ignore_keys_(_a )
UpperCAmelCase_ : str = state_dict["""decoder.embed_tokens.weight"""].shape[0]
UpperCAmelCase_ : List[str] = {key.replace("""decoder""" , """model""" ): val for key, val in state_dict.items()}
UpperCAmelCase_ : int = XGLMConfig(
vocab_size=_a , max_position_embeddings=args.max_target_positions , num_layers=args.decoder_layers , attention_heads=args.decoder_attention_heads , ffn_dim=args.decoder_ffn_embed_dim , d_model=args.decoder_embed_dim , layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function="""gelu""" , scale_embedding=not args.no_scale_embedding , tie_word_embeddings=args.share_decoder_input_output_embed , )
UpperCAmelCase_ : List[str] = XGLMForCausalLM(_a )
UpperCAmelCase_ : Tuple = model.load_state_dict(_a , strict=_a )
print(_a )
UpperCAmelCase_ : Optional[Any] = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''fairseq_path''', type=str, help='''path to a model.pt on local filesystem.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 345 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__snake_case : Optional[int] ={
'configuration_squeezebert': [
'SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SqueezeBertConfig',
'SqueezeBertOnnxConfig',
],
'tokenization_squeezebert': ['SqueezeBertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : List[str] =['SqueezeBertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] =[
'SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'SqueezeBertForMaskedLM',
'SqueezeBertForMultipleChoice',
'SqueezeBertForQuestionAnswering',
'SqueezeBertForSequenceClassification',
'SqueezeBertForTokenClassification',
'SqueezeBertModel',
'SqueezeBertModule',
'SqueezeBertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
__snake_case : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 363 |
import string
import numpy
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return b if a == 0 else greatest_common_divisor(b % a ,lowerCamelCase_)
class lowerCamelCase__ :
'''simple docstring'''
snake_case_ =string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
snake_case_ =numpy.vectorize(lambda lowerCamelCase__: x % 36)
snake_case_ =numpy.vectorize(lowerCamelCase__)
def __init__(self ,__lowerCamelCase ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.modulus(__lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
lowerCAmelCase__ : Optional[int] = encrypt_key.shape[0]
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> int:
"""simple docstring"""
return self.key_string.index(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
return self.key_string[round(__lowerCamelCase )]
def lowerCAmelCase__ (self ) -> None:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : str = det % len(self.key_string )
lowerCAmelCase__ : Optional[Any] = len(self.key_string )
if greatest_common_divisor(__lowerCamelCase ,len(self.key_string ) ) != 1:
lowerCAmelCase__ : List[str] = (
f"""determinant modular {req_l} of encryption key({det}) """
f"""is not co prime w.r.t {req_l}.\nTry another key."""
)
raise ValueError(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Dict = [char for char in text.upper() if char in self.key_string]
lowerCAmelCase__ : int = chars[-1]
while len(__lowerCamelCase ) % self.break_key != 0:
chars.append(__lowerCamelCase )
return "".join(__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : str = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : Any = text[i : i + self.break_key]
lowerCAmelCase__ : Dict = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : int = numpy.array([vec] ).T
lowerCAmelCase__ : Union[str, Any] = self.modulus(self.encrypt_key.dot(__lowerCamelCase ) ).T.tolist()[
0
]
lowerCAmelCase__ : Union[str, Any] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def lowerCAmelCase__ (self ) -> numpy.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Tuple = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
lowerCAmelCase__ : int = det % len(self.key_string )
lowerCAmelCase__ : Dict = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
lowerCAmelCase__ : Optional[Any] = i
break
lowerCAmelCase__ : Optional[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(__lowerCamelCase ) )
def lowerCAmelCase__ (self ,__lowerCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase__ : str = self.make_decrypt_key()
lowerCAmelCase__ : List[str] = self.process_text(text.upper() )
lowerCAmelCase__ : Optional[Any] = ''''''
for i in range(0 ,len(__lowerCamelCase ) - self.break_key + 1 ,self.break_key ):
lowerCAmelCase__ : List[Any] = text[i : i + self.break_key]
lowerCAmelCase__ : Tuple = [self.replace_letters(__lowerCamelCase ) for char in batch]
lowerCAmelCase__ : Optional[Any] = numpy.array([vec] ).T
lowerCAmelCase__ : Tuple = self.modulus(decrypt_key.dot(__lowerCamelCase ) ).T.tolist()[0]
lowerCAmelCase__ : Optional[int] = ''''''.join(
self.replace_digits(__lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : Any = int(input('''Enter the order of the encryption key: '''))
lowerCAmelCase__ : Union[str, Any] = []
print('''Enter each row of the encryption key with space separated integers''')
for _ in range(lowerCamelCase_):
lowerCAmelCase__ : int = [int(lowerCamelCase_) for x in input().split()]
hill_matrix.append(lowerCamelCase_)
lowerCAmelCase__ : List[str] = HillCipher(numpy.array(lowerCamelCase_))
print('''Would you like to encrypt or decrypt some text? (1 or 2)''')
lowerCAmelCase__ : List[Any] = input('''\n1. Encrypt\n2. Decrypt\n''')
if option == "1":
lowerCAmelCase__ : Optional[int] = input('''What text would you like to encrypt?: ''')
print('''Your encrypted text is:''')
print(hc.encrypt(lowerCamelCase_))
elif option == "2":
lowerCAmelCase__ : Dict = input('''What text would you like to decrypt?: ''')
print('''Your decrypted text is:''')
print(hc.decrypt(lowerCamelCase_))
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_funnel': ['FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FunnelConfig'],
'convert_funnel_original_tf_checkpoint_to_pytorch': [],
'tokenization_funnel': ['FunnelTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['FunnelTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'FunnelBaseModel',
'FunnelForMaskedLM',
'FunnelForMultipleChoice',
'FunnelForPreTraining',
'FunnelForQuestionAnswering',
'FunnelForSequenceClassification',
'FunnelForTokenClassification',
'FunnelModel',
'FunnelPreTrainedModel',
'load_tf_weights_in_funnel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFFunnelBaseModel',
'TFFunnelForMaskedLM',
'TFFunnelForMultipleChoice',
'TFFunnelForPreTraining',
'TFFunnelForQuestionAnswering',
'TFFunnelForSequenceClassification',
'TFFunnelForTokenClassification',
'TFFunnelModel',
'TFFunnelPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ ={
'configuration_roformer': ['ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'RoFormerConfig', 'RoFormerOnnxConfig'],
'tokenization_roformer': ['RoFormerTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =['RoFormerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'RoFormerForCausalLM',
'RoFormerForMaskedLM',
'RoFormerForMultipleChoice',
'RoFormerForQuestionAnswering',
'RoFormerForSequenceClassification',
'RoFormerForTokenClassification',
'RoFormerLayer',
'RoFormerModel',
'RoFormerPreTrainedModel',
'load_tf_weights_in_roformer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFRoFormerForCausalLM',
'TFRoFormerForMaskedLM',
'TFRoFormerForMultipleChoice',
'TFRoFormerForQuestionAnswering',
'TFRoFormerForSequenceClassification',
'TFRoFormerForTokenClassification',
'TFRoFormerLayer',
'TFRoFormerModel',
'TFRoFormerPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ =[
'FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'FlaxRoFormerForMaskedLM',
'FlaxRoFormerForMultipleChoice',
'FlaxRoFormerForQuestionAnswering',
'FlaxRoFormerForSequenceClassification',
'FlaxRoFormerForTokenClassification',
'FlaxRoFormerModel',
'FlaxRoFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
lowercase__ =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 216 | 1 |
__magic_name__: int = "Alexander Joslin"
import operator as op
from .stack import Stack
def UpperCamelCase ( _A ):
"""simple docstring"""
__magic_name__ : List[Any] = {"""*""": op.mul, """/""": op.truediv, """+""": op.add, """-""": op.sub}
__magic_name__ : Stack[int] = Stack()
__magic_name__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(_A ) )
elif i in operators:
# RULE 2
operator_stack.push(_A )
elif i == ")":
# RULE 4
__magic_name__ : Union[str, Any] = operator_stack.peek()
operator_stack.pop()
__magic_name__ : Optional[int] = operand_stack.peek()
operand_stack.pop()
__magic_name__ : Union[str, Any] = operand_stack.peek()
operand_stack.pop()
__magic_name__ : Optional[int] = operators[opr](_A, _A )
operand_stack.push(_A )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
__magic_name__: List[Any] = "(5 + ((4 * 2) * (2 + 3)))"
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""")
| 354 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return math.sqrt(sum(pow(a - b, 2 ) for a, b in zip(_A, _A ) ) )
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
if dataset.ndim != value_array.ndim:
__magic_name__ : str = (
"""Wrong input data's dimensions... """
f'dataset : {dataset.ndim}, value_array : {value_array.ndim}'
)
raise ValueError(_A )
try:
if dataset.shape[1] != value_array.shape[1]:
__magic_name__ : Optional[Any] = (
"""Wrong input data's shape... """
f'dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'
)
raise ValueError(_A )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("""Wrong shape""" )
if dataset.dtype != value_array.dtype:
__magic_name__ : List[Any] = (
"""Input data have different datatype... """
f'dataset : {dataset.dtype}, value_array : {value_array.dtype}'
)
raise TypeError(_A )
__magic_name__ : Dict = []
for value in value_array:
__magic_name__ : Tuple = euclidean(_A, dataset[0] )
__magic_name__ : Any = dataset[0].tolist()
for dataset_value in dataset[1:]:
__magic_name__ : Any = euclidean(_A, _A )
if dist > temp_dist:
__magic_name__ : Dict = temp_dist
__magic_name__ : Any = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def UpperCamelCase ( _A, _A ):
"""simple docstring"""
return np.dot(_A, _A ) / (norm(_A ) * norm(_A ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 138 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
lowerCAmelCase : int = logging.get_logger(__name__)
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Dict , *lowerCAmelCase__ : Union[str, Any] , **lowerCAmelCase__ : Any):
warnings.warn(
"The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
" use CLIPImageProcessor instead." , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__)
| 13 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase = 1000 )-> int:
UpperCamelCase = -1
UpperCamelCase = 0
for a in range(1 , n // 3 ):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
UpperCamelCase = (n * n - 2 * a * n) // (2 * n - 2 * a)
UpperCamelCase = n - a - b
if c * c == (a * a + b * b):
UpperCamelCase = a * b * c
if candidate >= product:
UpperCamelCase = candidate
return product
if __name__ == "__main__":
print(f'{solution() = }')
| 321 | 0 |
import baseaa
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> Optional[Any]:
"""simple docstring"""
return baseaa.baaencode(string.encode('utf-8' ) )
def SCREAMING_SNAKE_CASE_ ( __A : bytes ) -> List[str]:
"""simple docstring"""
return baseaa.baadecode(lowerCamelCase_ ).decode('utf-8' )
if __name__ == "__main__":
UpperCAmelCase_ : Tuple = 'Hello World!'
UpperCAmelCase_ : Optional[int] = baseaa_encode(test)
print(encoded)
UpperCAmelCase_ : List[str] = baseaa_decode(encoded)
print(decoded)
| 356 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : str = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : Any = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Dict = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Dict = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : List[str] = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Any = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Tuple = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
a_ : str = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
a_ : Tuple = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
BertTokenizer.from_pretrained(SCREAMING_SNAKE_CASE__ )
pipeline(task='fill-mask' , model=SCREAMING_SNAKE_CASE__ )
# baseline - just load from_pretrained with normal network
a_ : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
a_ : Dict = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
a_ : Any = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
a_ : Dict = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
a_ : Any = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Tuple = self.get_env()
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
a_ : str = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Dict = '1'
a_ : int = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
a_ : Union[str, Any] = '\nfrom transformers import pipeline\n '
a_ : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
a_ : Any = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
a_ : Union[str, Any] = self.get_env()
a_ : Optional[Any] = '1'
a_ : int = [sys.executable, '-c', '\n'.join([load, mock, run] )]
a_ : Union[str, Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
a_ : Optional[int] = '\nfrom transformers import AutoModel\n '
a_ : Dict = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
a_ : Tuple = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
a_ : Optional[Any] = self.get_env()
a_ : Dict = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
a_ : Optional[int] = '1'
a_ : Optional[Any] = subprocess.run(SCREAMING_SNAKE_CASE__ , env=SCREAMING_SNAKE_CASE__ , check=SCREAMING_SNAKE_CASE__ , capture_output=SCREAMING_SNAKE_CASE__ )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 120 | 0 |
'''simple docstring'''
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 83 |
"""simple docstring"""
from __future__ import annotations
from functools import lru_cache
from math import ceil
_A : Optional[Any] = 1_00
_A : Optional[int] = set(range(3, NUM_PRIMES, 2))
primes.add(2)
_A : int
for prime in range(3, ceil(NUM_PRIMES**0.5), 2):
if prime not in primes:
continue
primes.difference_update(set(range(prime * prime, NUM_PRIMES, prime)))
@lru_cache(maxsize=100 )
def __magic_name__ ( __snake_case : int ) -> set[int]:
if number_to_partition < 0:
return set()
elif number_to_partition == 0:
return {1}
lowercase : set[int] = set()
lowercase : int
lowercase : int
for prime in primes:
if prime > number_to_partition:
continue
for sub in partition(number_to_partition - prime ):
ret.add(sub * prime )
return ret
def __magic_name__ ( __snake_case : int = 5000 ) -> int | None:
for number_to_partition in range(1 , __snake_case ):
if len(partition(__snake_case ) ) > number_unique_partitions:
return number_to_partition
return None
if __name__ == "__main__":
print(F"{solution() = }")
| 202 | 0 |
import logging
import torch
from accelerate import Accelerator
from arguments import EvaluationArguments
from datasets import load_dataset
from torch.utils.data import IterableDataset
from torch.utils.data.dataloader import DataLoader
from transformers import AutoModelForCausalLM, AutoTokenizer, HfArgumentParser, set_seed
class A ( A_ ):
def __init__(self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=1_0_2_4 , lowerCAmelCase=3.6 ):
__lowercase= tokenizer
__lowercase= tokenizer.bos_token_id
__lowercase= dataset
__lowercase= seq_length
__lowercase= seq_length * chars_per_token * num_of_sequences
def __iter__(self ):
__lowercase= iter(self.dataset )
__lowercase= True
while more_examples:
__lowercase, __lowercase= [], 0
while True:
if buffer_len >= self.input_characters:
break
try:
buffer.append(next(lowerCAmelCase )['content'] )
buffer_len += len(buffer[-1] )
except StopIteration:
__lowercase= False
break
__lowercase= tokenizer(lowerCAmelCase , truncation=lowerCAmelCase )['input_ids']
__lowercase= []
for tokenized_input in tokenized_inputs:
all_token_ids.extend(tokenized_input + [self.concat_token_id] )
for i in range(0 , len(lowerCAmelCase ) , self.seq_length ):
__lowercase= all_token_ids[i : i + self.seq_length]
if len(lowerCAmelCase ) == self.seq_length:
yield torch.tensor(lowerCAmelCase )
def _lowerCamelCase( lowercase__ ) -> Optional[int]:
'''simple docstring'''
__lowercase= {'streaming': True}
__lowercase= load_dataset(args.dataset_name , split='train' , **lowercase__ )
__lowercase= ConstantLengthDataset(lowercase__ , lowercase__ , seq_length=args.seq_length )
__lowercase= DataLoader(lowercase__ , batch_size=args.batch_size )
return eval_dataloader
def _lowerCamelCase( lowercase__ ) -> Dict:
'''simple docstring'''
model.eval()
__lowercase= []
for step, batch in enumerate(lowercase__ ):
with torch.no_grad():
__lowercase= model(lowercase__ , labels=lowercase__ )
__lowercase= outputs.loss.repeat(args.batch_size )
losses.append(accelerator.gather(lowercase__ ) )
if args.max_eval_steps > 0 and step >= args.max_eval_steps:
break
__lowercase= torch.mean(torch.cat(lowercase__ ) )
try:
__lowercase= torch.exp(lowercase__ )
except OverflowError:
__lowercase= float('inf' )
return loss.item(), perplexity.item()
# Setup Accelerator
lowerCAmelCase = Accelerator()
# Parse configuration
lowerCAmelCase = HfArgumentParser(EvaluationArguments)
lowerCAmelCase = parser.parse_args()
set_seed(args.seed)
# Logging
lowerCAmelCase = logging.getLogger(__name__)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
# Load model and tokenizer
lowerCAmelCase = AutoModelForCausalLM.from_pretrained(args.model_ckpt)
lowerCAmelCase = AutoTokenizer.from_pretrained(args.model_ckpt)
# Load dataset and dataloader
lowerCAmelCase = create_dataloader(args)
# Prepare everything with our `accelerator`.
lowerCAmelCase ,lowerCAmelCase = accelerator.prepare(model, eval_dataloader)
# Evaluate and save the last checkpoint
logger.info('''Evaluating and saving model after training''')
lowerCAmelCase ,lowerCAmelCase = evaluate(args)
logger.info(F'loss/eval: {eval_loss}, perplexity: {perplexity}')
| 361 |
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
lowerCAmelCase = False
class A ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class A ( unittest.TestCase ):
def _A (self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCAmelCase )
__lowercase= VersatileDiffusionPipeline.from_pretrained(lowerCAmelCase , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= generator.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt='first prompt' , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type='numpy' , ).images
assert np.abs(image - new_image ).sum() < 1E-5, "Models don't have the same forward pass"
def _A (self ):
__lowercase= VersatileDiffusionPipeline.from_pretrained('shi-labs/versatile-diffusion' , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
__lowercase= 'cyberpunk 2077'
__lowercase= load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg' )
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.dual_guided(
prompt=lowerCAmelCase , image=lowerCAmelCase , text_to_image_strength=0.75 , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' , ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.14_48, 0.16_19, 0.17_41, 0.10_86, 0.11_47, 0.11_28, 0.11_99, 0.11_65, 0.10_01] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= 'A painting of a squirrel eating a burger '
__lowercase= torch.manual_seed(0 )
__lowercase= pipe.text_to_image(
prompt=lowerCAmelCase , generator=lowerCAmelCase , guidance_scale=7.5 , num_inference_steps=5_0 , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.33_67, 0.31_69, 0.26_56, 0.38_70, 0.47_90, 0.37_96, 0.40_09, 0.48_78, 0.47_78] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
__lowercase= pipe.image_variation(lowerCAmelCase , generator=lowerCAmelCase , output_type='numpy' ).images
__lowercase= image[0, 2_5_3:2_5_6, 2_5_3:2_5_6, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
__lowercase= np.array([0.30_76, 0.31_23, 0.32_84, 0.37_82, 0.37_70, 0.38_94, 0.42_97, 0.43_31, 0.44_56] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-1
| 304 | 0 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def UpperCAmelCase ( UpperCamelCase__ = "isbn/0140328726" ):
"""simple docstring"""
A__ = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A__ = F'''{olid} is not a valid Open Library olid'''
raise ValueError(UpperCamelCase__ )
return requests.get(F'''https://openlibrary.org/{new_olid}.json''' ).json()
def UpperCAmelCase ( UpperCamelCase__ ):
"""simple docstring"""
A__ = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A__ = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A__ = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A__ = data['First sentence']['value']
for key, value in data.items():
if isinstance(UpperCamelCase__ , UpperCamelCase__ ):
A__ = ', '.join(UpperCamelCase__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
__lowerCamelCase = input("\nEnter the ISBN code to search (or 'quit' to stop): ").strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F'''Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.''')
continue
print(F'''\nSearching Open Library for ISBN: {isbn}...\n''')
try:
__lowerCamelCase = summarize_book(get_openlibrary_data(F'''isbn/{isbn}'''))
print("\n".join(F'''{key}: {value}''' for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F'''Sorry, there are no results for ISBN: {isbn}.''')
| 221 | """simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers import (
TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST,
BertConfig,
DPRConfig,
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
class UpperCamelCase__:
def __init__( self ,__UpperCAmelCase ,__UpperCAmelCase=13 ,__UpperCAmelCase=7 ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=True ,__UpperCAmelCase=99 ,__UpperCAmelCase=32 ,__UpperCAmelCase=2 ,__UpperCAmelCase=4 ,__UpperCAmelCase=37 ,__UpperCAmelCase="gelu" ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=0.1 ,__UpperCAmelCase=5_12 ,__UpperCAmelCase=16 ,__UpperCAmelCase=2 ,__UpperCAmelCase=0.0_2 ,__UpperCAmelCase=3 ,__UpperCAmelCase=4 ,__UpperCAmelCase=None ,__UpperCAmelCase=0 ,) -> Dict:
A__ = parent
A__ = batch_size
A__ = seq_length
A__ = is_training
A__ = use_input_mask
A__ = use_token_type_ids
A__ = use_labels
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = intermediate_size
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = type_sequence_label_size
A__ = initializer_range
A__ = num_labels
A__ = num_choices
A__ = scope
A__ = projection_dim
def snake_case__ ( self ) -> Optional[Any]:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A__ = None
if self.use_input_mask:
# follow test_modeling_tf_ctrl.py
A__ = random_attention_mask([self.batch_size, self.seq_length] )
A__ = None
if self.use_token_type_ids:
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A__ = None
A__ = None
A__ = None
if self.use_labels:
A__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A__ = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A__ = ids_tensor([self.batch_size] ,self.num_choices )
A__ = BertConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=__UpperCAmelCase ,initializer_range=self.initializer_range ,)
A__ = DPRConfig(projection_dim=self.projection_dim ,**config.to_dict() )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Tuple:
A__ = TFDPRContextEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Union[str, Any]:
A__ = TFDPRQuestionEncoder(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,token_type_ids=__UpperCAmelCase )
A__ = model(__UpperCAmelCase )
self.parent.assertEqual(result.pooler_output.shape ,(self.batch_size, self.projection_dim or self.hidden_size) )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ) -> Optional[int]:
A__ = TFDPRReader(config=__UpperCAmelCase )
A__ = model(__UpperCAmelCase ,attention_mask=__UpperCAmelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.relevance_logits.shape ,(self.batch_size,) )
def snake_case__ ( self ) -> int:
A__ = self.prepare_config_and_inputs()
(
(
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) , (
A__
) ,
) = config_and_inputs
A__ = {'input_ids': input_ids}
return config, inputs_dict
@require_tf
class UpperCamelCase__( __A , __A , unittest.TestCase ):
lowerCAmelCase__ : Optional[int] = (
(
TFDPRContextEncoder,
TFDPRQuestionEncoder,
TFDPRReader,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ : List[str] = {'feature-extraction': TFDPRQuestionEncoder} if is_tf_available() else {}
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Optional[int] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : int = False
lowerCAmelCase__ : str = False
def snake_case__ ( self ) -> str:
A__ = TFDPRModelTester(self )
A__ = ConfigTester(self ,config_class=__UpperCAmelCase ,hidden_size=37 )
def snake_case__ ( self ) -> Optional[Any]:
self.config_tester.run_common_tests()
def snake_case__ ( self ) -> int:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_context_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> Optional[Any]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_question_encoder(*__UpperCAmelCase )
def snake_case__ ( self ) -> List[str]:
A__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_dpr_reader(*__UpperCAmelCase )
@slow
def snake_case__ ( self ) -> int:
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRContextEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRQuestionEncoder.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
for model_name in TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A__ = TFDPRReader.from_pretrained(__UpperCAmelCase )
self.assertIsNotNone(__UpperCAmelCase )
@require_tf
class UpperCamelCase__( unittest.TestCase ):
@slow
def snake_case__ ( self ) -> Optional[Any]:
A__ = TFDPRQuestionEncoder.from_pretrained('facebook/dpr-question_encoder-single-nq-base' )
A__ = tf.constant(
[[1_01, 75_92, 10_10, 20_03, 20_26, 38_99, 1_01_40, 10_29, 1_02]] ) # [CLS] hello, is my dog cute? [SEP]
A__ = model(__UpperCAmelCase )[0] # embedding shape = (1, 768)
# compare the actual values for a slice.
A__ = tf.constant(
[
[
0.0_3_2_3_6_2_5_3,
0.1_2_7_5_3_3_3_5,
0.1_6_8_1_8_5_0_9,
0.0_0_2_7_9_7_8_6,
0.3_8_9_6_9_3_3,
0.2_4_2_6_4_9_4_5,
0.2_1_7_8_9_7_1,
-0.0_2_3_3_5_2_2_7,
-0.0_8_4_8_1_9_5_9,
-0.1_4_3_2_4_1_1_7,
]
] )
self.assertTrue(numpy.allclose(output[:, :10].numpy() ,expected_slice.numpy() ,atol=1e-4 ) )
| 221 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase__ : List[str] = {"configuration_mra": ["MRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MraConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = [
"MRA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MraForMaskedLM",
"MraForMultipleChoice",
"MraForQuestionAnswering",
"MraForSequenceClassification",
"MraForTokenClassification",
"MraLayer",
"MraModel",
"MraPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mra import MRA_PRETRAINED_CONFIG_ARCHIVE_MAP, MraConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mra import (
MRA_PRETRAINED_MODEL_ARCHIVE_LIST,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraLayer,
MraModel,
MraPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 180 |
from manim import *
class a__ ( UpperCamelCase__ ):
def lowerCAmelCase_ ( self ) -> List[Any]:
'''simple docstring'''
a = Rectangle(height=0.5 , width=0.5 )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0 )
a = Rectangle(height=0.2_5 , width=0.2_5 )
a = [mem.copy() for i in range(6 )]
a = [mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(A , A ).arrange(A , buff=0 )
a = Text("CPU" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(A )
a = [mem.copy() for i in range(4 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = Text("GPU" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
gpu.move_to([-1, -1, 0] )
self.add(A )
a = [mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = Text("Model" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
model.move_to([3, -1.0, 0] )
self.add(A )
a = []
a = []
for i, rect in enumerate(A ):
a = fill.copy().set_fill(A , opacity=0.8 )
target.move_to(A )
model_arr.append(A )
a = Rectangle(height=0.4_6 , width=0.4_6 ).set_stroke(width=0.0 ).set_fill(A , opacity=0.8 )
cpu_target.move_to(cpu_left_col_base[i] )
model_cpu_arr.append(A )
self.add(*A , *A )
a = [meta_mem.copy() for i in range(6 )]
a = [meta_mem.copy() for i in range(6 )]
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(*A ).arrange(A , buff=0 )
a = VGroup(A , A ).arrange(A , buff=0 )
a = Text("Disk" , font_size=24 )
a = Group(A , A ).arrange(A , buff=0.5 , aligned_edge=A )
disk.move_to([-4, -1.2_5, 0] )
self.add(A , A )
a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
a = MarkupText(
F'''<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model''' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(A , A )
a = MarkupText(
F'''<span fgcolor=\'{BLUE}\'>●</span> Checkpoint''' , font_size=18 , )
blue_text.next_to(A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
self.add(A )
a = MarkupText(
F'''Now watch as an input is passed through the model\nand how the memory is utilized and handled.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A ) )
a = Square(0.3 )
input.set_fill(A , opacity=1.0 )
input.set_stroke(width=0.0 )
input.next_to(model_base[0] , A , buff=0.5 )
self.play(Write(A ) )
input.generate_target()
input.target.next_to(model_arr[0] , direction=A , buff=0.0_2 )
self.play(MoveToTarget(A ) )
self.play(FadeOut(A ) )
a = Arrow(start=A , end=A , color=A , buff=0.5 )
a.next_to(model_arr[0].get_left() , A , buff=0.2 )
model_cpu_arr[0].generate_target()
model_cpu_arr[0].target.move_to(gpu_rect[0] )
a = MarkupText(
F'''As the input reaches a layer, the hook triggers\nand weights are moved from the CPU\nto the GPU and back.''' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(A , run_time=3 ) )
a = {"run_time": 1, "fade_in": True, "fade_out": True, "buff": 0.0_2}
self.play(
Write(A ) , Circumscribe(model_arr[0] , color=A , **A ) , Circumscribe(model_cpu_arr[0] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , )
self.play(MoveToTarget(model_cpu_arr[0] ) )
a = a.copy()
for i in range(6 ):
a_c.next_to(model_arr[i].get_right() + 0.0_2 , A , buff=0.2 )
input.generate_target()
input.target.move_to(model_arr[i].get_right() + 0.0_2 )
a = AnimationGroup(
FadeOut(A , run_time=0.5 ) , MoveToTarget(A , run_time=0.5 ) , FadeIn(A , run_time=0.5 ) , lag_ratio=0.2 )
self.play(A )
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[i] )
if i < 5:
model_cpu_arr[i + 1].generate_target()
model_cpu_arr[i + 1].target.move_to(gpu_rect[0] )
if i >= 1:
a = 0.7
self.play(
Circumscribe(model_arr[i] , **A ) , Circumscribe(cpu_left_col_base[i] , **A ) , Circumscribe(cpu_left_col_base[i + 1] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , Circumscribe(model_arr[i + 1] , color=A , **A ) , )
if i < 1:
self.play(
MoveToTarget(model_cpu_arr[i] ) , MoveToTarget(model_cpu_arr[i + 1] ) , )
else:
self.play(
MoveToTarget(model_cpu_arr[i] , run_time=0.7 ) , MoveToTarget(model_cpu_arr[i + 1] , run_time=0.7 ) , )
else:
model_cpu_arr[i].generate_target()
model_cpu_arr[i].target.move_to(cpu_left_col_base[-1] )
input.generate_target()
input.target.next_to(model_arr[-1].get_right() , RIGHT + 0.0_2 , buff=0.2 )
self.play(
Circumscribe(model_arr[-1] , color=A , **A ) , Circumscribe(cpu_left_col_base[-1] , color=A , **A ) , Circumscribe(gpu_rect[0] , color=A , **A ) , )
self.play(MoveToTarget(model_cpu_arr[i] ) )
a = a_c
a = a_c.copy()
input.generate_target()
input.target.next_to(model_base[-1] , RIGHT + 0.0_2 , buff=0.5 )
self.play(
FadeOut(A ) , FadeOut(A , run_time=0.5 ) , )
a = MarkupText(F'''Inference on a model too large for GPU memory\nis successfully completed.''' , font_size=24 )
step_a.move_to([2, 2, 0] )
self.play(Write(A , run_time=3 ) , MoveToTarget(A ) )
self.wait()
| 180 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Optional[int] = {'''configuration_plbart''': ['''PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PLBartConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Any = ['''PLBartTokenizer''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : int = [
'''PLBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PLBartForCausalLM''',
'''PLBartForConditionalGeneration''',
'''PLBartForSequenceClassification''',
'''PLBartModel''',
'''PLBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_plbart import PLBART_PRETRAINED_CONFIG_ARCHIVE_MAP, PLBartConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_plbart import PLBartTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_plbart import (
PLBART_PRETRAINED_MODEL_ARCHIVE_LIST,
PLBartForCausalLM,
PLBartForConditionalGeneration,
PLBartForSequenceClassification,
PLBartModel,
PLBartPreTrainedModel,
)
else:
import sys
lowercase__ : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 190 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class snake_case ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
def __init__( self : Optional[int] , lowerCAmelCase : NestedDataStructureLike[PathLike] , lowerCAmelCase : Optional[NamedSplit] = None , lowerCAmelCase : Optional[Features] = None , lowerCAmelCase : str = None , lowerCAmelCase : bool = False , lowerCAmelCase : bool = False , lowerCAmelCase : Optional[str] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Optional[Any] , ) -> int:
"""simple docstring"""
super().__init__(
lowerCAmelCase , split=lowerCAmelCase , features=lowerCAmelCase , cache_dir=lowerCAmelCase , keep_in_memory=lowerCAmelCase , streaming=lowerCAmelCase , num_proc=lowerCAmelCase , **lowerCAmelCase , )
_snake_case : Tuple = field
_snake_case : str = path_or_paths if isinstance(lowerCAmelCase , lowerCAmelCase) else {self.split: path_or_paths}
_snake_case : int = Json(
cache_dir=lowerCAmelCase , data_files=lowerCAmelCase , features=lowerCAmelCase , field=lowerCAmelCase , **lowerCAmelCase , )
def UpperCamelCase_ ( self : Any) -> Tuple:
"""simple docstring"""
if self.streaming:
_snake_case : int = self.builder.as_streaming_dataset(split=self.split)
# Build regular (map-style) dataset
else:
_snake_case : Dict = None
_snake_case : Optional[int] = None
_snake_case : Optional[Any] = None
_snake_case : str = None
self.builder.download_and_prepare(
download_config=lowerCAmelCase , download_mode=lowerCAmelCase , verification_mode=lowerCAmelCase , base_path=lowerCAmelCase , num_proc=self.num_proc , )
_snake_case : List[str] = self.builder.as_dataset(
split=self.split , verification_mode=lowerCAmelCase , in_memory=self.keep_in_memory)
return dataset
class snake_case :
'''simple docstring'''
def __init__( self : Union[str, Any] , lowerCAmelCase : Dataset , lowerCAmelCase : Union[PathLike, BinaryIO] , lowerCAmelCase : Optional[int] = None , lowerCAmelCase : Optional[int] = None , **lowerCAmelCase : Any , ) -> Optional[int]:
"""simple docstring"""
if num_proc is not None and num_proc <= 0:
raise ValueError(F'''num_proc {num_proc} must be an integer > 0.''')
_snake_case : Optional[Any] = dataset
_snake_case : str = path_or_buf
_snake_case : Optional[Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_snake_case : Tuple = num_proc
_snake_case : Dict = """utf-8"""
_snake_case : str = to_json_kwargs
def UpperCamelCase_ ( self : Optional[Any]) -> int:
"""simple docstring"""
_snake_case : Optional[Any] = self.to_json_kwargs.pop("""path_or_buf""" , lowerCAmelCase)
_snake_case : Any = self.to_json_kwargs.pop("""orient""" , """records""")
_snake_case : List[str] = self.to_json_kwargs.pop("""lines""" , True if orient == """records""" else False)
_snake_case : List[Any] = self.to_json_kwargs.pop("""index""" , False if orient in ["""split""", """table"""] else True)
_snake_case : Union[str, Any] = self.to_json_kwargs.pop("""compression""" , lowerCAmelCase)
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(F'''`datasets` currently does not support {compression} compression''')
if isinstance(self.path_or_buf , (str, bytes, os.PathLike)):
with fsspec.open(self.path_or_buf , """wb""" , compression=lowerCAmelCase) as buffer:
_snake_case : List[str] = self._write(file_obj=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
else:
if compression:
raise NotImplementedError(
F'''The compression parameter is not supported when writing to a buffer, but compression={compression}'''
""" was passed. Please provide a local path instead.""")
_snake_case : Tuple = self._write(
file_obj=self.path_or_buf , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **self.to_json_kwargs)
return written
def UpperCamelCase_ ( self : Tuple , lowerCAmelCase : Optional[int]) -> Optional[Any]:
"""simple docstring"""
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case : int = args
_snake_case : int = query_table(
table=self.dataset.data , key=slice(lowerCAmelCase , offset + self.batch_size) , indices=self.dataset._indices , )
_snake_case : Optional[Any] = batch.to_pandas().to_json(
path_or_buf=lowerCAmelCase , orient=lowerCAmelCase , lines=lowerCAmelCase , index=lowerCAmelCase , **lowerCAmelCase)
if not json_str.endswith("""\n"""):
json_str += "\n"
return json_str.encode(self.encoding)
def UpperCamelCase_ ( self : Union[str, Any] , lowerCAmelCase : BinaryIO , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , **lowerCAmelCase : List[Any] , ) -> int:
"""simple docstring"""
_snake_case : Optional[int] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset) , self.batch_size) , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
_snake_case : Tuple = self._batch_json((offset, orient, lines, index, to_json_kwargs))
written += file_obj.write(lowerCAmelCase)
else:
_snake_case , _snake_case : str = len(self.dataset), self.batch_size
with multiprocessing.Pool(self.num_proc) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , lowerCAmelCase , lowerCAmelCase)] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="""ba""" , disable=not logging.is_progress_bar_enabled() , desc="""Creating json from Arrow format""" , ):
written += file_obj.write(lowerCAmelCase)
return written
| 317 | 0 |
import os
import jsonlines
import numpy as np
from tqdm import tqdm
a__ : Tuple = 2_048
a__ : Union[str, Any] = 4_096
a__ : Dict = 42
a__ : List[Any] = os.environ.pop('''PROCESS_TRAIN''', '''false''')
a__ : Optional[Any] = {'''null''': 0, '''short''': 1, '''long''': 2, '''yes''': 3, '''no''': 4}
def UpperCAmelCase_( a__ ):
"""simple docstring"""
def choose_first(a__ , a__=False ):
assert isinstance(a__ , a__ )
if len(a__ ) == 1:
SCREAMING_SNAKE_CASE : Optional[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
SCREAMING_SNAKE_CASE : str = {k: [a[k]] for k in a}
if len(a['''start_token'''] ) > 0:
break
return a
SCREAMING_SNAKE_CASE : Any = {'''id''': example['''id''']}
SCREAMING_SNAKE_CASE : Tuple = example['''annotations''']
SCREAMING_SNAKE_CASE : List[Any] = annotation['''yes_no_answer''']
if 0 in yes_no_answer or 1 in yes_no_answer:
SCREAMING_SNAKE_CASE : Dict = ['''yes'''] if 1 in yes_no_answer else ['''no''']
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = ['''<cls>''']
else:
SCREAMING_SNAKE_CASE : Tuple = ['''short''']
SCREAMING_SNAKE_CASE : int = choose_first(annotation['''short_answers'''] )
if len(out['''start_token'''] ) == 0:
# answer will be long if short is not available
SCREAMING_SNAKE_CASE : Dict = ['''long''']
SCREAMING_SNAKE_CASE : str = choose_first(annotation['''long_answer'''] , is_long_answer=a__ )
SCREAMING_SNAKE_CASE : str = []
answer.update(a__ )
# disregard some samples
if len(answer['''start_token'''] ) > 1 or answer["start_token"] == answer["end_token"]:
SCREAMING_SNAKE_CASE : Tuple = True
else:
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = ['''start_token''', '''end_token''', '''start_byte''', '''end_byte''', '''text''']
if not all(isinstance(answer[k] , a__ ) for k in cols ):
raise ValueError('''Issue in ID''' , example['''id'''] )
return answer
def UpperCAmelCase_( a__ , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Optional[int] = _get_single_answer(a__ )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE : Optional[int] = example['''document''']['''tokens''']
SCREAMING_SNAKE_CASE : str = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
return {
"context": " ".join(a__ ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
SCREAMING_SNAKE_CASE : Optional[Any] = ['''start_token''', '''end_token''']
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
SCREAMING_SNAKE_CASE : Dict = example['''document''']['''tokens''']
SCREAMING_SNAKE_CASE : Union[str, Any] = answer['''start_token''']
SCREAMING_SNAKE_CASE : Optional[Any] = answer['''end_token''']
SCREAMING_SNAKE_CASE : List[str] = []
for i in range(len(doc['''token'''] ) ):
if not doc["is_html"][i]:
context.append(doc['''token'''][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
SCREAMING_SNAKE_CASE : List[str] = ''' '''.join(context[start_token:end_token] )
# checking above code
if assertion:
SCREAMING_SNAKE_CASE : List[str] = doc['''is_html'''][answer['''start_token'''] : answer['''end_token''']]
SCREAMING_SNAKE_CASE : str = doc['''token'''][answer['''start_token'''] : answer['''end_token''']]
SCREAMING_SNAKE_CASE : Union[str, Any] = ''' '''.join([old[i] for i in range(len(a__ ) ) if not is_html[i]] )
if new != old:
print('''ID:''' , example['''id'''] )
print('''New:''' , a__ , end='''\n''' )
print('''Old:''' , a__ , end='''\n\n''' )
return {
"context": " ".join(a__ ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def UpperCAmelCase_( a__ , a__ , a__=2_048 , a__=4_096 , a__=True ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : Dict = get_context_and_ans(a__ , assertion=a__ )
SCREAMING_SNAKE_CASE : Union[str, Any] = out['''answer''']
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer(example['''question''']['''text'''] , out['''context'''] ).input_ids
SCREAMING_SNAKE_CASE : Tuple = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
SCREAMING_SNAKE_CASE : str = []
SCREAMING_SNAKE_CASE : Any = []
SCREAMING_SNAKE_CASE : Union[str, Any] = input_ids[:q_len]
SCREAMING_SNAKE_CASE : Dict = range(a__ , len(a__ ) , max_length - doc_stride )
for i in doc_start_indices:
SCREAMING_SNAKE_CASE : List[str] = i + max_length - q_len
SCREAMING_SNAKE_CASE : Any = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer['''category'''][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(a__ ),
"end_token": [-100] * len(a__ ),
"category": category,
},
}
SCREAMING_SNAKE_CASE : Optional[Any] = out['''context'''].split()
SCREAMING_SNAKE_CASE : str = splitted_context[answer['''end_token''']]
SCREAMING_SNAKE_CASE : Union[str, Any] = len(
tokenizer(
''' '''.join(splitted_context[: answer['''start_token''']] ) , add_special_tokens=a__ , ).input_ids )
SCREAMING_SNAKE_CASE : List[str] = len(
tokenizer(''' '''.join(splitted_context[: answer['''end_token''']] ) , add_special_tokens=a__ ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
SCREAMING_SNAKE_CASE : Dict = len(tokenizer(a__ , add_special_tokens=a__ ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
SCREAMING_SNAKE_CASE : List[str] = input_ids[answer['''start_token'''] : answer['''end_token'''] + 1] # right & left are inclusive
SCREAMING_SNAKE_CASE : int = answer['''start_token''']
SCREAMING_SNAKE_CASE : List[Any] = answer['''end_token''']
if assertion:
SCREAMING_SNAKE_CASE : List[str] = tokenizer.decode(a__ )
if answer["span"] != new:
print('''ISSUE IN TOKENIZATION''' )
print('''OLD:''' , answer['''span'''] )
print('''NEW:''' , a__ , end='''\n\n''' )
if len(a__ ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
SCREAMING_SNAKE_CASE : str = input_ids[:q_len]
SCREAMING_SNAKE_CASE : Optional[Any] = range(a__ , len(a__ ) , max_length - doc_stride )
SCREAMING_SNAKE_CASE : Union[str, Any] = []
SCREAMING_SNAKE_CASE : Optional[int] = []
SCREAMING_SNAKE_CASE : Optional[Any] = []
SCREAMING_SNAKE_CASE : List[str] = [] # null, yes, no, long, short
for i in doc_start_indices:
SCREAMING_SNAKE_CASE : int = i + max_length - q_len
SCREAMING_SNAKE_CASE : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
SCREAMING_SNAKE_CASE : Any = start_token - i + q_len
SCREAMING_SNAKE_CASE : Optional[Any] = end_token - i + q_len
answers_category.append(answer['''category'''][0] ) # ["short"] -> "short"
else:
SCREAMING_SNAKE_CASE : List[Any] = -100
SCREAMING_SNAKE_CASE : Any = -100
answers_category.append('''null''' )
SCREAMING_SNAKE_CASE : Tuple = inputs[-1][start_token : end_token + 1]
answers_start_token.append(a__ )
answers_end_token.append(a__ )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print('''ISSUE in strided for ID:''' , example['''id'''] )
print('''New:''' , tokenizer.decode(a__ ) )
print('''Old:''' , tokenizer.decode(a__ ) , end='''\n\n''' )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def UpperCAmelCase_( a__ , a__ , a__=2_048 , a__=4_096 , a__=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = get_strided_contexts_and_ans(
a__ , a__ , doc_stride=a__ , max_length=a__ , assertion=a__ , )
return example
def UpperCAmelCase_( a__ , a__ ):
"""simple docstring"""
with jsonlines.open(a__ , '''a''' ) as writer:
for example in tqdm(a__ , total=len(a__ ) , desc='''Saving samples ... ''' ):
SCREAMING_SNAKE_CASE : Union[str, Any] = example['''labels''']
for ids, start, end, cat in zip(
example['''input_ids'''] , labels['''start_token'''] , labels['''end_token'''] , labels['''category'''] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
'''input_ids''': ids,
'''start_token''': start,
'''end_token''': end,
'''category''': CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
a__ : Union[str, Any] = load_dataset('''natural_questions''')
a__ : Any = BigBirdTokenizer.from_pretrained('''google/bigbird-roberta-base''')
a__ : Dict = data['''train''' if PROCESS_TRAIN == '''true''' else '''validation''']
a__ : Tuple = {
'''tokenizer''': tokenizer,
'''doc_stride''': DOC_STRIDE,
'''max_length''': MAX_LENGTH,
'''assertion''': False,
}
a__ : Optional[Any] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
a__ : Tuple = data.remove_columns(['''annotations''', '''document''', '''id''', '''question'''])
print(data)
np.random.seed(SEED)
a__ : List[str] = '''nq-training.jsonl''' if PROCESS_TRAIN == '''true''' else '''nq-validation.jsonl'''
save_to_disk(data, file_name=cache_file_name)
| 19 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
a__ : Optional[Any] = {'''configuration_deit''': ['''DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''DeiTConfig''', '''DeiTOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Optional[Any] = ['''DeiTFeatureExtractor''']
a__ : Any = ['''DeiTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : Tuple = [
'''DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''DeiTForImageClassification''',
'''DeiTForImageClassificationWithTeacher''',
'''DeiTForMaskedImageModeling''',
'''DeiTModel''',
'''DeiTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a__ : List[str] = [
'''TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFDeiTForImageClassification''',
'''TFDeiTForImageClassificationWithTeacher''',
'''TFDeiTForMaskedImageModeling''',
'''TFDeiTModel''',
'''TFDeiTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
a__ : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 19 | 1 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
__A = random.Random()
if is_torch_available():
import torch
def lowercase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: str=1.0 , _lowerCamelCase: Any=None , _lowerCamelCase: str=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__lowerCamelCase : Any = global_rng
__lowerCamelCase : List[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class _snake_case ( unittest.TestCase ):
def __init__( self : Tuple , UpperCAmelCase : Optional[Any] , UpperCAmelCase : int=7 , UpperCAmelCase : str=400 , UpperCAmelCase : List[Any]=2000 , UpperCAmelCase : str=1 , UpperCAmelCase : int=0.0 , UpperCAmelCase : Tuple=16000 , UpperCAmelCase : List[Any]=True , UpperCAmelCase : Dict=True , ):
__lowerCamelCase : Union[str, Any] = parent
__lowerCamelCase : Any = batch_size
__lowerCamelCase : Any = min_seq_length
__lowerCamelCase : int = max_seq_length
__lowerCamelCase : Any = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__lowerCamelCase : str = feature_size
__lowerCamelCase : Optional[int] = padding_value
__lowerCamelCase : Optional[int] = sampling_rate
__lowerCamelCase : List[Any] = return_attention_mask
__lowerCamelCase : List[Any] = do_normalize
def lowerCamelCase__ ( self : Optional[int] ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowerCamelCase__ ( self : Optional[int] , UpperCAmelCase : str=False , UpperCAmelCase : str=False ):
def _flatten(UpperCAmelCase : Union[str, Any] ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
__lowerCamelCase : Optional[Any] = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
__lowerCamelCase : str = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
__lowerCamelCase : Any = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _snake_case ( snake_case_ , unittest.TestCase ):
snake_case__ = ASTFeatureExtractor
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : Tuple = ASTFeatureExtractionTester(self )
def lowerCamelCase__ ( self : List[str] ):
__lowerCamelCase : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__lowerCamelCase : Tuple = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
__lowerCamelCase : Tuple = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test not batched input
__lowerCamelCase : Union[str, Any] = feat_extract(speech_inputs[0] , return_tensors="np" ).input_values
__lowerCamelCase : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors="np" ).input_values
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test batched
__lowerCamelCase : List[str] = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" ).input_values
__lowerCamelCase : Tuple = feat_extract(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__lowerCamelCase : Union[str, Any] = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__lowerCamelCase : List[str] = np.asarray(UpperCamelCase__ )
__lowerCamelCase : Optional[int] = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
__lowerCamelCase : Any = feat_extract(UpperCamelCase__ , return_tensors="np" ).input_values
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
@require_torch
def lowerCamelCase__ ( self : List[str] ):
import torch
__lowerCamelCase : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__lowerCamelCase : Optional[int] = np.random.rand(100 ).astype(np.floataa )
__lowerCamelCase : Dict = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__lowerCamelCase : Union[str, Any] = feature_extractor.pad([{"input_values": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
__lowerCamelCase : Tuple = feature_extractor.pad([{"input_values": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowerCamelCase__ ( self : int , UpperCAmelCase : Dict ):
from datasets import load_dataset
__lowerCamelCase : Optional[Any] = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
__lowerCamelCase : Optional[int] = ds.sort("id" ).select(range(UpperCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
@require_torch
def lowerCamelCase__ ( self : Tuple ):
__lowerCamelCase : List[str] = torch.tensor(
[-0.9_8_9_4, -1.2_7_7_6, -0.9_0_6_6, -1.2_7_7_6, -0.9_3_4_9, -1.2_6_0_9, -1.0_3_8_6, -1.2_7_7_6,
-1.1_5_6_1, -1.2_7_7_6, -1.2_0_5_2, -1.2_7_2_3, -1.2_1_9_0, -1.2_1_3_2, -1.2_7_7_6, -1.1_1_3_3,
-1.1_9_5_3, -1.1_3_4_3, -1.1_5_8_4, -1.2_2_0_3, -1.1_7_7_0, -1.2_4_7_4, -1.2_3_8_1, -1.1_9_3_6,
-0.9_2_7_0, -0.8_3_1_7, -0.8_0_4_9, -0.7_7_0_6, -0.7_5_6_5, -0.7_8_6_9] )
# fmt: on
__lowerCamelCase : Dict = self._load_datasamples(1 )
__lowerCamelCase : Union[str, Any] = ASTFeatureExtractor()
__lowerCamelCase : Optional[Any] = feature_extractor(UpperCamelCase__ , return_tensors="pt" ).input_values
self.assertEquals(input_values.shape , (1, 1024, 128) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , UpperCamelCase__ , atol=1E-4 ) ) | 135 |
"""simple docstring"""
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__snake_case = logging.getLogger(__name__)
class _lowerCAmelCase ( snake_case_ ):
__UpperCAmelCase : Optional[int] = '''token-classification'''
def __init__( self , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
if type(UpperCamelCase__ ) == dict:
snake_case : Optional[int] = Namespace(**UpperCamelCase__ )
snake_case : Optional[int] = import_module("tasks" )
try:
snake_case : Optional[int] = getattr(UpperCamelCase__ , hparams.task_type )
snake_case : TokenClassificationTask = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F'Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. '
F'Available tasks classes are: {TokenClassificationTask.__subclasses__()}' )
snake_case : str = self.token_classification_task.get_labels(hparams.labels )
snake_case : Union[str, Any] = CrossEntropyLoss().ignore_index
super().__init__(UpperCamelCase__ , len(self.labels ) , self.mode )
def lowerCamelCase ( self , **UpperCamelCase__ ) -> Any:
'''simple docstring'''
return self.model(**UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Tuple:
'''simple docstring'''
snake_case : Optional[Any] = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
snake_case : Optional[int] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case : List[Any] = self(**UpperCamelCase__ )
snake_case : Union[str, Any] = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def lowerCamelCase ( self ) -> Optional[int]:
'''simple docstring'''
snake_case : Optional[Any] = self.hparams
for mode in ["train", "dev", "test"]:
snake_case : Optional[Any] = self._feature_file(UpperCamelCase__ )
if os.path.exists(UpperCamelCase__ ) and not args.overwrite_cache:
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
snake_case : List[str] = torch.load(UpperCamelCase__ )
else:
logger.info("Creating features from dataset file at %s" , args.data_dir )
snake_case : Optional[Any] = self.token_classification_task.read_examples_from_file(args.data_dir , UpperCamelCase__ )
snake_case : Dict = self.token_classification_task.convert_examples_to_features(
UpperCamelCase__ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ["xlnet"] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=UpperCamelCase__ , pad_on_left=bool(self.config.model_type in ["xlnet"] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info("Saving features into cached file %s" , UpperCamelCase__ )
torch.save(UpperCamelCase__ , UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = False ) -> DataLoader:
'''simple docstring'''
snake_case : Optional[Any] = self._feature_file(UpperCamelCase__ )
logger.info("Loading features from cached file %s" , UpperCamelCase__ )
snake_case : Any = torch.load(UpperCamelCase__ )
snake_case : Tuple = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
snake_case : List[str] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
snake_case : Tuple = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
snake_case : Union[str, Any] = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
snake_case : Optional[int] = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ) , batch_size=UpperCamelCase__ )
def lowerCamelCase ( self , UpperCamelCase__ , UpperCamelCase__ ) -> Optional[Any]:
'''simple docstring'''
"""Compute validation""" ""
snake_case : Any = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]}
if self.config.model_type != "distilbert":
snake_case : Optional[int] = (
batch[2] if self.config.model_type in ["bert", "xlnet"] else None
) # XLM and RoBERTa don"t use token_type_ids
snake_case : Optional[int] = self(**UpperCamelCase__ )
snake_case ,snake_case : str = outputs[:2]
snake_case : Optional[int] = logits.detach().cpu().numpy()
snake_case : str = inputs["labels"].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case : Dict = torch.stack([x["val_loss"] for x in outputs] ).mean()
snake_case : List[str] = np.concatenate([x["pred"] for x in outputs] , axis=0 )
snake_case : Any = np.argmax(UpperCamelCase__ , axis=2 )
snake_case : Dict = np.concatenate([x["target"] for x in outputs] , axis=0 )
snake_case : Tuple = dict(enumerate(self.labels ) )
snake_case : str = [[] for _ in range(out_label_ids.shape[0] )]
snake_case : List[Any] = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
snake_case : Union[str, Any] = {
"val_loss": val_loss_mean,
"accuracy_score": accuracy_score(UpperCamelCase__ , UpperCamelCase__ ),
"precision": precision_score(UpperCamelCase__ , UpperCamelCase__ ),
"recall": recall_score(UpperCamelCase__ , UpperCamelCase__ ),
"f1": fa_score(UpperCamelCase__ , UpperCamelCase__ ),
}
snake_case : int = dict(results.items() )
snake_case : Union[str, Any] = results
return ret, preds_list, out_label_list
def lowerCamelCase ( self , UpperCamelCase__ ) -> List[str]:
'''simple docstring'''
snake_case ,snake_case ,snake_case : Optional[Any] = self._eval_end(UpperCamelCase__ )
snake_case : Tuple = ret["log"]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def lowerCamelCase ( self , UpperCamelCase__ ) -> Any:
'''simple docstring'''
snake_case ,snake_case ,snake_case : List[Any] = self._eval_end(UpperCamelCase__ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
snake_case : Optional[Any] = ret["log"]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ) -> str:
'''simple docstring'''
BaseTransformer.add_model_specific_args(UpperCamelCase__ , UpperCamelCase__ )
parser.add_argument(
"--task_type" , default="NER" , type=UpperCamelCase__ , help="Task type to fine tune in training (e.g. NER, POS, etc)" )
parser.add_argument(
"--max_seq_length" , default=128 , type=UpperCamelCase__ , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--labels" , default="" , type=UpperCamelCase__ , help="Path to a file containing all labels. If not specified, CoNLL-2003 labels are used." , )
parser.add_argument(
"--gpus" , default=0 , type=UpperCamelCase__ , help="The number of GPUs allocated for this, it is by default 0 meaning none" , )
parser.add_argument(
"--overwrite_cache" , action="store_true" , help="Overwrite the cached training and evaluation sets" )
return parser
if __name__ == "__main__":
__snake_case = argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__snake_case = NERTransformer.add_model_specific_args(parser, os.getcwd())
__snake_case = parser.parse_args()
__snake_case = NERTransformer(args)
__snake_case = generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__snake_case = sorted(glob.glob(os.path.join(args.output_dir, """checkpoint-epoch=*.ckpt"""), recursive=True))
__snake_case = model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 203 | 0 |
"""simple docstring"""
import inspect
import re
from hashlib import shaaaa
from typing import Dict, List
from .arrow import arrow
from .audiofolder import audiofolder
from .csv import csv
from .imagefolder import imagefolder
from .json import json
from .pandas import pandas
from .parquet import parquet
from .sql import sql # noqa F401
from .text import text
def lowerCAmelCase__ ( _UpperCamelCase : List[str] ) -> str:
"""simple docstring"""
snake_case = []
for line in lines:
snake_case = re.sub(r'#.*' , '' , _UpperCamelCase ) # remove comments
if line:
filtered_lines.append(_UpperCamelCase )
snake_case = '\n'.join(_UpperCamelCase )
# Make a hash from all this code
snake_case = full_str.encode('utf-8' )
return shaaaa(_UpperCamelCase ).hexdigest()
# get importable module names and hash for caching
SCREAMING_SNAKE_CASE__ = {
"csv": (csv.__name__, _hash_python_lines(inspect.getsource(csv).splitlines())),
"json": (json.__name__, _hash_python_lines(inspect.getsource(json).splitlines())),
"pandas": (pandas.__name__, _hash_python_lines(inspect.getsource(pandas).splitlines())),
"parquet": (parquet.__name__, _hash_python_lines(inspect.getsource(parquet).splitlines())),
"arrow": (arrow.__name__, _hash_python_lines(inspect.getsource(arrow).splitlines())),
"text": (text.__name__, _hash_python_lines(inspect.getsource(text).splitlines())),
"imagefolder": (imagefolder.__name__, _hash_python_lines(inspect.getsource(imagefolder).splitlines())),
"audiofolder": (audiofolder.__name__, _hash_python_lines(inspect.getsource(audiofolder).splitlines())),
}
# Used to infer the module to use based on the data files extensions
SCREAMING_SNAKE_CASE__ = {
".csv": ("csv", {}),
".tsv": ("csv", {"sep": "\t"}),
".json": ("json", {}),
".jsonl": ("json", {}),
".parquet": ("parquet", {}),
".arrow": ("arrow", {}),
".txt": ("text", {}),
}
_EXTENSION_TO_MODULE.update({ext: ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("imagefolder", {}) for ext in imagefolder.ImageFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext: ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
_EXTENSION_TO_MODULE.update({ext.upper(): ("audiofolder", {}) for ext in audiofolder.AudioFolder.EXTENSIONS})
SCREAMING_SNAKE_CASE__ = {"imagefolder", "audiofolder"}
# Used to filter data files based on extensions given a module name
SCREAMING_SNAKE_CASE__ = {}
for _ext, (_module, _) in _EXTENSION_TO_MODULE.items():
_MODULE_TO_EXTENSIONS.setdefault(_module, []).append(_ext)
_MODULE_TO_EXTENSIONS["imagefolder"].append(".zip")
_MODULE_TO_EXTENSIONS["audiofolder"].append(".zip")
| 149 | """simple docstring"""
import os
import sys
import unittest
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import check_dummies # noqa: E402
from check_dummies import create_dummy_files, create_dummy_object, find_backend, read_init # noqa: E402
# Align TRANSFORMERS_PATH in check_dummies with the current path
SCREAMING_SNAKE_CASE__ = os.path.join(git_repo_path, "src", "transformers")
SCREAMING_SNAKE_CASE__ = "\n{0} = None\n"
SCREAMING_SNAKE_CASE__ = "\nclass {0}(metaclass=DummyObject):\n _backends = {1}\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, {1})\n"
SCREAMING_SNAKE_CASE__ = "\ndef {0}(*args, **kwargs):\n requires_backends({0}, {1})\n"
class lowerCAmelCase_ ( unittest.TestCase ):
"""simple docstring"""
def snake_case ( self ):
"""simple docstring"""
snake_case = find_backend(' _import_structure["models.albert"].append("AlbertTokenizerFast")' )
self.assertIsNone(lowerCAmelCase )
snake_case = find_backend(' if not is_tokenizers_available():' )
self.assertEqual(lowerCAmelCase , 'tokenizers' )
snake_case = find_backend(' if not is_tensorflow_text_available():' )
self.assertEqual(lowerCAmelCase , 'tensorflow_text' )
snake_case = find_backend(' if not (is_sentencepiece_available() and is_tokenizers_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tensorflow_text_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tensorflow_text' )
snake_case = find_backend(
' if not (is_sentencepiece_available() and is_tokenizers_available() and is_vision_available()):' )
self.assertEqual(lowerCAmelCase , 'sentencepiece_and_tokenizers_and_vision' )
def snake_case ( self ):
"""simple docstring"""
snake_case = read_init()
# We don't assert on the exact list of keys to allow for smooth grow of backend-specific objects
self.assertIn('torch' , lowerCAmelCase )
self.assertIn('tensorflow_text' , lowerCAmelCase )
self.assertIn('sentencepiece_and_tokenizers' , lowerCAmelCase )
# Likewise, we can't assert on the exact content of a key
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertModel' , objects['tf'] )
self.assertIn('FlaxBertModel' , objects['flax'] )
self.assertIn('BertModel' , objects['torch'] )
self.assertIn('TFBertTokenizer' , objects['tensorflow_text'] )
self.assertIn('convert_slow_tokenizer' , objects['sentencepiece_and_tokenizers'] )
def snake_case ( self ):
"""simple docstring"""
snake_case = create_dummy_object('CONSTANT' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , '\nCONSTANT = None\n' )
snake_case = create_dummy_object('function' , '\'torch\'' )
self.assertEqual(
lowerCAmelCase , '\ndef function(*args, **kwargs):\n requires_backends(function, \'torch\')\n' )
snake_case = '\nclass FakeClass(metaclass=DummyObject):\n _backends = \'torch\'\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, \'torch\')\n'
snake_case = create_dummy_object('FakeClass' , '\'torch\'' )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
def snake_case ( self ):
"""simple docstring"""
snake_case = '# This file is autogenerated by the command `make fix-copies`, do not edit.\nfrom ..utils import DummyObject, requires_backends\n\n\nCONSTANT = None\n\n\ndef function(*args, **kwargs):\n requires_backends(function, ["torch"])\n\n\nclass FakeClass(metaclass=DummyObject):\n _backends = ["torch"]\n\n def __init__(self, *args, **kwargs):\n requires_backends(self, ["torch"])\n'
snake_case = create_dummy_files({'torch': ['CONSTANT', 'function', 'FakeClass']} )
self.assertEqual(dummy_files['torch'] , lowerCAmelCase )
| 149 | 1 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use CLIPImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 214 | 1 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class __A :
def __init__( self : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int]=14 , UpperCAmelCase_ : Any=7 , UpperCAmelCase_ : int=True , UpperCAmelCase_ : Optional[int]=True , UpperCAmelCase_ : Optional[int]=False , UpperCAmelCase_ : Union[str, Any]=True , UpperCAmelCase_ : Optional[Any]=99 , UpperCAmelCase_ : str=32 , UpperCAmelCase_ : Union[str, Any]=4 , UpperCAmelCase_ : Tuple=4 , UpperCAmelCase_ : int=4 , UpperCAmelCase_ : str=37 , UpperCAmelCase_ : Optional[Any]="gelu" , UpperCAmelCase_ : Union[str, Any]=0.1 , UpperCAmelCase_ : Optional[Any]=0.1 , UpperCAmelCase_ : Optional[Any]=512 , UpperCAmelCase_ : List[str]=0.02 , ):
lowerCAmelCase : Dict = parent
lowerCAmelCase : Optional[Any] = batch_size
lowerCAmelCase : List[Any] = seq_length
lowerCAmelCase : int = is_training
lowerCAmelCase : str = use_input_mask
lowerCAmelCase : int = use_token_type_ids
lowerCAmelCase : List[Any] = use_labels
lowerCAmelCase : str = vocab_size
lowerCAmelCase : Optional[Any] = hidden_size
lowerCAmelCase : Dict = rotary_dim
lowerCAmelCase : int = num_hidden_layers
lowerCAmelCase : Optional[int] = num_attention_heads
lowerCAmelCase : Optional[int] = intermediate_size
lowerCAmelCase : Any = hidden_act
lowerCAmelCase : Tuple = hidden_dropout_prob
lowerCAmelCase : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase : int = max_position_embeddings
lowerCAmelCase : Dict = initializer_range
lowerCAmelCase : int = None
lowerCAmelCase : List[Any] = vocab_size - 1
lowerCAmelCase : Dict = vocab_size - 1
lowerCAmelCase : Union[str, Any] = vocab_size - 1
def lowercase__ ( self : str ):
lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase : Optional[Any] = None
if self.use_input_mask:
lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase : Dict = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=UpperCAmelCase_ , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def lowercase__ ( self : Dict ):
lowerCAmelCase : int = self.prepare_config_and_inputs()
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = config_and_inputs
lowerCAmelCase : List[Any] = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowercase__ ( self : Union[str, Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Tuple ):
lowerCAmelCase : Optional[int] = 20
lowerCAmelCase : List[Any] = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
lowerCAmelCase : Union[str, Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase : Optional[int] = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : Tuple = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : List[Any] = model(
input_ids[:, -1:] , attention_mask=UpperCAmelCase_ , past_key_values=outputs_cache.past_key_values , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : List[Any] = model(UpperCAmelCase_ )
lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def lowercase__ ( self : Any , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : Union[str, Any] ):
lowerCAmelCase : List[str] = 20
lowerCAmelCase : Any = model_class_name(UpperCAmelCase_ )
lowerCAmelCase : Tuple = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
lowerCAmelCase : Optional[Any] = model.init_cache(input_ids.shape[0] , UpperCAmelCase_ )
lowerCAmelCase : List[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=UpperCAmelCase_ , past_key_values=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : int = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
lowerCAmelCase : Dict = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=UpperCAmelCase_ , position_ids=UpperCAmelCase_ , )
lowerCAmelCase : Union[str, Any] = model(UpperCAmelCase_ , attention_mask=UpperCAmelCase_ )
lowerCAmelCase : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class __A ( lowerCAmelCase , lowerCAmelCase , unittest.TestCase ):
lowerCAmelCase_ : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase_ : Tuple = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def lowercase__ ( self : List[Any] ):
lowerCAmelCase : Tuple = FlaxGPTJModelTester(self )
def lowercase__ ( self : int ):
for model_class_name in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
def lowercase__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
@tooslow
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
lowerCAmelCase : int = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ )
lowerCAmelCase : Tuple = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCAmelCase : List[str] = False
lowerCAmelCase : List[str] = model.config.eos_token_id
lowerCAmelCase : Optional[Any] = jax.jit(model.generate )
lowerCAmelCase : List[str] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(UpperCAmelCase_ , skip_special_tokens=UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(UpperCAmelCase_ , UpperCAmelCase_ )
@is_pt_flax_cross_test
def lowercase__ ( self : Optional[int] ):
lowerCAmelCase , lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase : Optional[Any] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase : Any = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase : Union[str, Any] = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase , lowerCAmelCase : Dict = pt_inputs['input_ids'].shape
lowerCAmelCase : Tuple = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : Any = 0
lowerCAmelCase : List[Any] = 1
lowerCAmelCase : List[str] = 0
lowerCAmelCase : int = 1
lowerCAmelCase : Optional[Any] = pt_model_class(UpperCAmelCase_ ).eval()
lowerCAmelCase : int = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
lowerCAmelCase : List[str] = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , UpperCAmelCase_ )
lowerCAmelCase : Optional[Any] = fx_state
with torch.no_grad():
lowerCAmelCase : List[str] = pt_model(**UpperCAmelCase_ ).to_tuple()
lowerCAmelCase : Optional[int] = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[str] = model_class.from_pretrained(UpperCAmelCase_ , from_pt=UpperCAmelCase_ )
lowerCAmelCase : str = fx_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def lowercase__ ( self : List[str] ):
lowerCAmelCase , lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
lowerCAmelCase : List[str] = self._prepare_for_class(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
lowerCAmelCase : Optional[int] = model_class.__name__[4:] # Skip the "Flax" at the beginning
lowerCAmelCase : str = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
lowerCAmelCase : Union[str, Any] = pt_model_class(UpperCAmelCase_ ).eval()
lowerCAmelCase : Optional[int] = model_class(UpperCAmelCase_ , dtype=jnp.floataa )
lowerCAmelCase : Dict = load_flax_weights_in_pytorch_model(UpperCAmelCase_ , fx_model.params )
lowerCAmelCase , lowerCAmelCase : List[str] = pt_inputs['input_ids'].shape
lowerCAmelCase : Any = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCAmelCase_ ):
lowerCAmelCase : List[str] = 0
lowerCAmelCase : Optional[Any] = 1
lowerCAmelCase : List[Any] = 0
lowerCAmelCase : Union[str, Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
lowerCAmelCase : List[Any] = pt_model(**UpperCAmelCase_ ).to_tuple()
lowerCAmelCase : int = fx_model(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(UpperCAmelCase_ )
lowerCAmelCase : List[str] = pt_model_class.from_pretrained(UpperCAmelCase_ , from_flax=UpperCAmelCase_ )
with torch.no_grad():
lowerCAmelCase : Tuple = pt_model_loaded(**UpperCAmelCase_ ).to_tuple()
self.assertEqual(
len(UpperCAmelCase_ ) , len(UpperCAmelCase_ ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(UpperCAmelCase_ , UpperCAmelCase_ ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def lowercase__ ( self : Dict ):
for model_class_name in self.all_model_classes:
lowerCAmelCase : str = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
lowerCAmelCase : List[str] = model(np.ones((1, 1) ) )
self.assertIsNotNone(UpperCAmelCase_ )
| 323 |
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCAmelCase_ ( snake_case ):
UpperCamelCase =["image_processor", "tokenizer"]
UpperCamelCase ="FlavaImageProcessor"
UpperCamelCase =("BertTokenizer", "BertTokenizerFast")
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , **UpperCamelCase_ ) -> Union[str, Any]:
__lowercase : Optional[int] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , UpperCAmelCase_ , )
__lowercase : Tuple = kwargs.pop('''feature_extractor''' )
__lowercase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
__lowercase : List[str] = self.image_processor
def __call__( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = True , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = None , UpperCamelCase_ = 0 , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = False , UpperCamelCase_ = True , UpperCamelCase_ = None , **UpperCamelCase_ , ) -> Dict:
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
__lowercase : Optional[Any] = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if images is not None:
__lowercase : Any = self.image_processor(
UpperCAmelCase_ , return_image_mask=UpperCAmelCase_ , return_codebook_pixels=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
if text is not None and images is not None:
encoding.update(UpperCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**UpperCAmelCase_ ) , tensor_type=UpperCAmelCase_ )
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> List[str]:
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def _lowerCamelCase ( self , *UpperCamelCase_ , **UpperCamelCase_ ) -> str:
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Any = self.tokenizer.model_input_names
__lowercase : Dict = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self ) -> Optional[int]:
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , UpperCAmelCase_ , )
return self.image_processor_class
@property
def _lowerCamelCase ( self ) -> List[str]:
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , UpperCAmelCase_ , )
return self.image_processor
| 249 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, Generic, TypeVar
__A : Any = TypeVar('''T''')
class __A ( Generic[T] ):
def __init__( self : Dict , UpperCAmelCase_ : list[T] , UpperCAmelCase_ : Callable[[T, T], T] ):
lowerCAmelCase : Any | T = None
lowerCAmelCase : int = len(UpperCAmelCase_ )
lowerCAmelCase : list[T] = [any_type for _ in range(self.N )] + arr
lowerCAmelCase : List[Any] = fnc
self.build()
def lowercase__ ( self : str ):
for p in range(self.N - 1 , 0 , -1 ):
lowerCAmelCase : Optional[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : int , UpperCAmelCase_ : int , UpperCAmelCase_ : T ):
p += self.N
lowerCAmelCase : int = v
while p > 1:
lowerCAmelCase : List[Any] = p // 2
lowerCAmelCase : List[Any] = self.fn(self.st[p * 2] , self.st[p * 2 + 1] )
def lowercase__ ( self : Optional[Any] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ): # noqa: E741
lowerCAmelCase , lowerCAmelCase : str = l + self.N, r + self.N
lowerCAmelCase : T | None = None
while l <= r:
if l % 2 == 1:
lowerCAmelCase : Any = self.st[l] if res is None else self.fn(UpperCAmelCase_ , self.st[l] )
if r % 2 == 0:
lowerCAmelCase : Optional[int] = self.st[r] if res is None else self.fn(UpperCAmelCase_ , self.st[r] )
lowerCAmelCase , lowerCAmelCase : Optional[Any] = (l + 1) // 2, (r - 1) // 2
return res
if __name__ == "__main__":
from functools import reduce
__A : str = [1, 10, -2, 9, -3, 8, 4, -7, 5, 6, 11, -12]
__A : List[Any] = {
0: 7,
1: 2,
2: 6,
3: -14,
4: 5,
5: 4,
6: 7,
7: -10,
8: 9,
9: 10,
10: 12,
11: 1,
}
__A : Optional[int] = SegmentTree(test_array, min)
__A : Optional[int] = SegmentTree(test_array, max)
__A : Dict = SegmentTree(test_array, lambda a, b: a + b)
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
for i in range(len(_UpperCAmelCase ) ):
for j in range(_UpperCAmelCase, len(_UpperCAmelCase ) ):
lowerCAmelCase : str = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : Dict = reduce(_UpperCAmelCase, test_array[i : j + 1] )
lowerCAmelCase : str = reduce(lambda _UpperCAmelCase, _UpperCAmelCase : a + b, test_array[i : j + 1] )
assert min_range == min_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert max_range == max_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
assert sum_range == sum_segment_tree.query(_UpperCAmelCase, _UpperCAmelCase )
test_all_segments()
for index, value in test_updates.items():
__A : int = value
min_segment_tree.update(index, value)
max_segment_tree.update(index, value)
sum_segment_tree.update(index, value)
test_all_segments()
| 138 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : int ={
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Union[str, Any] =["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : List[str] =[
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 361 |
'''simple docstring'''
import qiskit
def UpperCamelCase ( _lowerCamelCase : int , _lowerCamelCase : int ):
A__ = qiskit.Aer.get_backend("aer_simulator" )
# Create a Quantum Circuit acting on the q register
A__ = qiskit.QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
# Map the quantum measurement to the classical bits
circuit.measure([0] , [0] )
# Execute the circuit on the simulator
A__ = qiskit.execute(_lowerCamelCase , _lowerCamelCase , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(f"""Total count for various states are: {single_qubit_measure(1, 1)}""")
| 123 | 0 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
A: str = logging.get_logger(__name__)
A: Dict = {"vocab_file": "vocab.json", "merges_file": "merges.txt"}
# See all BART models at https://huggingface.co/models?filter=bart
A: int = {
"vocab_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/vocab.json",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/vocab.json",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json",
},
"merges_file": {
"facebook/bart-base": "https://huggingface.co/facebook/bart-base/resolve/main/merges.txt",
"facebook/bart-large": "https://huggingface.co/facebook/bart-large/resolve/main/merges.txt",
"facebook/bart-large-mnli": "https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt",
"facebook/bart-large-cnn": "https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt",
"facebook/bart-large-xsum": "https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt",
"yjernite/bart_eli5": "https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt",
},
}
A: str = {
"facebook/bart-base": 1_0_2_4,
"facebook/bart-large": 1_0_2_4,
"facebook/bart-large-mnli": 1_0_2_4,
"facebook/bart-large-cnn": 1_0_2_4,
"facebook/bart-large-xsum": 1_0_2_4,
"yjernite/bart_eli5": 1_0_2_4,
}
@lru_cache()
def _snake_case ( ):
UpperCAmelCase : List[Any] = (
list(range(ord("""!""" ) , ord("""~""" ) + 1 ) ) + list(range(ord("""¡""" ) , ord("""¬""" ) + 1 ) ) + list(range(ord("""®""" ) , ord("""ÿ""" ) + 1 ) )
)
UpperCAmelCase : List[str] = bs[:]
UpperCAmelCase : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(UpperCamelCase )
cs.append(2**8 + n )
n += 1
UpperCAmelCase : Tuple = [chr(UpperCamelCase ) for n in cs]
return dict(zip(UpperCamelCase , UpperCamelCase ) )
def _snake_case ( UpperCamelCase : Dict ):
UpperCAmelCase : Union[str, Any] = set()
UpperCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase : Any = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ ):
__lowerCAmelCase : List[str] = VOCAB_FILES_NAMES
__lowerCAmelCase : Any = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase : int = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else bos_token
UpperCAmelCase : List[Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else eos_token
UpperCAmelCase : Optional[int] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else sep_token
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else cls_token
UpperCAmelCase : Union[str, Any] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else unk_token
UpperCAmelCase : Tuple = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase : List[str] = AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else mask_token
super().__init__(
errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as vocab_handle:
UpperCAmelCase : str = json.load(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
UpperCAmelCase : int = errors # how to handle errors in decoding
UpperCAmelCase : str = bytes_to_unicode()
UpperCAmelCase : Tuple = {v: k for k, v in self.byte_encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as merges_handle:
UpperCAmelCase : Union[str, Any] = merges_handle.read().split("""\n""" )[1:-1]
UpperCAmelCase : Dict = [tuple(merge.split() ) for merge in bpe_merges]
UpperCAmelCase : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
UpperCAmelCase : Optional[Any] = {}
UpperCAmelCase : str = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
UpperCAmelCase : List[Any] = re.compile(r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""" )
@property
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return len(self.encoder )
def SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
UpperCAmelCase : List[str] = tuple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Optional[int] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
return token
while True:
UpperCAmelCase : Union[str, Any] = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase , UpperCAmelCase : Optional[int] = bigram
UpperCAmelCase : Optional[Any] = []
UpperCAmelCase : Optional[Any] = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
UpperCAmelCase : List[str] = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase : Union[str, Any] = j
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase : Union[str, Any] = tuple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Dict = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
UpperCAmelCase : Union[str, Any] = get_pairs(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : List[Any] = """ """.join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Tuple = word
return word
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase : Tuple = []
for token in re.findall(self.pat , _SCREAMING_SNAKE_CASE ):
UpperCAmelCase : Tuple = """""".join(
self.byte_encoder[b] for b in token.encode("""utf-8""" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_SCREAMING_SNAKE_CASE ).split(""" """ ) )
return bpe_tokens
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> List[Any]:
'''simple docstring'''
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return self.decoder.get(_SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : str = """""".join(_SCREAMING_SNAKE_CASE )
UpperCAmelCase : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode("""utf-8""" , errors=self.errors )
return text
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
UpperCAmelCase : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + """\n""" )
UpperCAmelCase : Optional[Any] = 0
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
F"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
""" Please check that the tokenizer is not corrupted!""" )
UpperCAmelCase : str = token_index
writer.write(""" """.join(_SCREAMING_SNAKE_CASE ) + """\n""" )
index += 1
return vocab_file, merge_file
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
UpperCAmelCase : List[str] = [self.cls_token_id]
UpperCAmelCase : Dict = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ) -> List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1, 1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ) -> List[int]:
'''simple docstring'''
UpperCAmelCase : Dict = [self.sep_token_id]
UpperCAmelCase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase : int = kwargs.pop("""add_prefix_space""" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_SCREAMING_SNAKE_CASE ) > 0 and not text[0].isspace()):
UpperCAmelCase : Optional[Any] = """ """ + text
return (text, kwargs)
| 109 |
'''simple docstring'''
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoFeatureExtractor, WavaVecaFeatureExtractor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / "utils"))
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
__A : str = get_tests_dir("fixtures")
class __snake_case ( unittest.TestCase):
"""simple docstring"""
def __lowercase ( self : int ) -> List[str]:
# A mock response for an HTTP head request to emulate server down
lowerCAmelCase_ : str = mock.Mock()
lowerCAmelCase_ : Optional[Any] = 5_00
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : str = HTTPError
lowerCAmelCase_ : Any = {}
# Download this model to make sure it's in the cache.
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch("""requests.Session.request""" , return_value=lowerCamelCase ) as mock_head:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained("""hf-internal-testing/tiny-random-wav2vec2""" )
# This check we did call the fake head request
mock_head.assert_called()
def __lowercase ( self : Dict ) -> Any:
# This test is for deprecated behavior and can be removed in v5
lowerCAmelCase_ : List[str] = WavaVecaFeatureExtractor.from_pretrained(
"""https://huggingface.co/hf-internal-testing/tiny-random-wav2vec2/resolve/main/preprocessor_config.json""" )
@is_staging_test
class __snake_case ( unittest.TestCase):
"""simple docstring"""
@classmethod
def __lowercase ( cls : Tuple ) -> str:
lowerCAmelCase_ : Dict = TOKEN
HfFolder.save_token(lowerCamelCase )
@classmethod
def __lowercase ( cls : Any ) -> Any:
try:
delete_repo(token=cls._token , repo_id="""test-feature-extractor""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""valid_org/test-feature-extractor-org""" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="""test-dynamic-feature-extractor""" )
except HTTPError:
pass
def __lowercase ( self : int ) -> str:
lowerCAmelCase_ : Tuple = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""test-feature-extractor""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Any = WavaVecaFeatureExtractor.from_pretrained(F'{USER}/test-feature-extractor' )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> int:
lowerCAmelCase_ : Union[str, Any] = WavaVecaFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""valid_org/test-feature-extractor""" , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
# Reset repo
delete_repo(token=self._token , repo_id="""valid_org/test-feature-extractor""" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(
lowerCamelCase , repo_id="""valid_org/test-feature-extractor-org""" , push_to_hub=lowerCamelCase , use_auth_token=self._token )
lowerCAmelCase_ : Optional[int] = WavaVecaFeatureExtractor.from_pretrained("""valid_org/test-feature-extractor-org""" )
for k, v in feature_extractor.__dict__.items():
self.assertEqual(lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase ) )
def __lowercase ( self : Optional[Any] ) -> Any:
CustomFeatureExtractor.register_for_auto_class()
lowerCAmelCase_ : Dict = CustomFeatureExtractor.from_pretrained(lowerCamelCase )
feature_extractor.push_to_hub("""test-dynamic-feature-extractor""" , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
feature_extractor.auto_map , {"""AutoFeatureExtractor""": """custom_feature_extraction.CustomFeatureExtractor"""} , )
lowerCAmelCase_ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
F'{USER}/test-dynamic-feature-extractor' , trust_remote_code=lowerCamelCase )
# Can't make an isinstance check because the new_feature_extractor is from the CustomFeatureExtractor class of a dynamic module
self.assertEqual(new_feature_extractor.__class__.__name__ , """CustomFeatureExtractor""" )
| 120 | 0 |
"""simple docstring"""
def lowercase ( _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , _snake_case : float , ) ->float:
"""simple docstring"""
__snake_case : Optional[int] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
__snake_case : Optional[Any] = 1 - (matter_density + radiation_density + dark_energy)
__snake_case : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
__snake_case : List[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
SCREAMING_SNAKE_CASE : Any = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 24 |
"""simple docstring"""
import json
import os
import tempfile
from unittest.mock import patch
import torch
from torch.utils.data import DataLoader, TensorDataset
from accelerate import DistributedType, infer_auto_device_map, init_empty_weights
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState, PartialState
from accelerate.test_utils import require_bnb, require_multi_gpu, slow
from accelerate.test_utils.testing import AccelerateTestCase, require_cuda
from accelerate.utils import patch_environment
def lowercase ( ) ->Optional[int]:
"""simple docstring"""
__snake_case : int = torch.nn.Linear(2 , 4 )
__snake_case : Optional[Any] = torch.optim.AdamW(model.parameters() , lr=1.0 )
__snake_case : Optional[Any] = torch.optim.lr_scheduler.OneCycleLR(_snake_case , max_lr=0.01 , steps_per_epoch=2 , epochs=1 )
__snake_case : List[str] = DataLoader(TensorDataset(torch.tensor([1, 2, 3] ) ) )
__snake_case : Dict = DataLoader(TensorDataset(torch.tensor([4, 5, 6] ) ) )
return model, optimizer, scheduler, train_dl, valid_dl
def lowercase ( _snake_case : str ) ->Optional[Any]:
"""simple docstring"""
return (model.weight.abs().sum() + model.bias.abs().sum()).item()
def lowercase ( _snake_case : Union[str, Any] ) ->Tuple:
"""simple docstring"""
__snake_case : Dict = torch.nn.Linear(*tuple(model.weight.T.shape ) ).state_dict()
model.load_state_dict(_snake_case )
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
assert PartialState._shared_state["_cpu"] is False
assert PartialState._shared_state["device"].type == "cuda"
with self.assertRaises(a_ ):
__snake_case : Any = Accelerator(cpu=a_ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case : Optional[int] = GradientState()
assert state.num_steps == 1
__snake_case : str = 4
assert state.num_steps == 4
assert state.sync_gradients is True
__snake_case : List[Any] = False
assert state.sync_gradients is False
GradientState._reset_state()
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
(
(
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) , (
__snake_case
) ,
) : Union[str, Any] = accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
self.assertTrue(prepared_model in accelerator._models )
self.assertTrue(prepared_optimizer in accelerator._optimizers )
self.assertTrue(prepared_scheduler in accelerator._schedulers )
self.assertTrue(prepared_train_dl in accelerator._dataloaders )
self.assertTrue(prepared_valid_dl in accelerator._dataloaders )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Tuple = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Union[str, Any] = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
accelerator.free_memory()
self.assertTrue(len(accelerator._models ) == 0 )
self.assertTrue(len(accelerator._optimizers ) == 0 )
self.assertTrue(len(accelerator._schedulers ) == 0 )
self.assertTrue(len(accelerator._dataloaders ) == 0 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
PartialState._reset_state()
# Mock torch.cuda.set_device to avoid an exception as the device doesn't exist
def noop(*a_ , **a_ ):
pass
with patch('''torch.cuda.set_device''' , a_ ), patch_environment(ACCELERATE_TORCH_DEVICE='''cuda:64''' ):
__snake_case : List[Any] = Accelerator()
self.assertEqual(str(accelerator.state.device ) , '''cuda:64''' )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Optional[int] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : Any = get_signature(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# make sure loaded weights match
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : int = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = create_components()
accelerator.prepare(a_ , a_ , a_ , a_ , a_ )
__snake_case : List[Any] = get_signature(a_ )
# saving hook
def save_config(a_ , a_ , a_ ):
__snake_case : Optional[Any] = {'''class_name''': models[0].__class__.__name__}
with open(os.path.join(a_ , '''data.json''' ) , '''w''' ) as f:
json.dump(a_ , a_ )
# loading hook
def load_config(a_ , a_ ):
with open(os.path.join(a_ , '''data.json''' ) , '''r''' ) as f:
__snake_case : Any = json.load(a_ )
__snake_case : List[str] = config['''class_name''']
__snake_case : str = accelerator.register_save_state_pre_hook(a_ )
__snake_case : Union[str, Any] = accelerator.register_load_state_pre_hook(a_ )
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Any = '''random'''
# make sure loaded weights match with hooks
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is loaded from config
self.assertTrue(model.class_name == model.__class__.__name__ )
# remove hooks
save_hook.remove()
load_hook.remove()
with tempfile.TemporaryDirectory() as tmpdirname:
accelerator.save_state(a_ )
# make sure random weights don't match with hooks removed
load_random_weights(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) > 1E-3 )
# random class name to verify correct one is loaded
__snake_case : Union[str, Any] = '''random'''
# make sure loaded weights match with hooks removed
accelerator.load_state(a_ )
self.assertTrue(abs(model_signature - get_signature(a_ ) ) < 1E-3 )
# mode.class_name is NOT loaded from config
self.assertTrue(model.class_name != model.__class__.__name__ )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = create_components()
__snake_case : Union[str, Any] = None
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Tuple = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertTrue(dummy_obj is None )
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : str = Accelerator()
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : Optional[Any] = create_components()
__snake_case : Optional[int] = [1, 2, 3]
# This should work
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case : str = accelerator.prepare(
a_ , a_ , a_ , a_ , a_ , a_ )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Dummy object should have `_is_accelerate_prepared` set to `True`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Model is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Optimizer is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Scheduler is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Train Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
self.assertEqual(
getattr(a_ , '''_is_accelerate_prepared''' , a_ ) , a_ , '''Valid Dataloader is missing `_is_accelerator_prepared` or is set to `False`''' , )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map={'''''': 0} , )
__snake_case : Optional[Any] = Accelerator()
# This should work
__snake_case : Any = accelerator.prepare(a_ )
@slow
@require_bnb
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : Any = Accelerator()
with init_empty_weights():
__snake_case : List[str] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : Union[str, Any] = infer_auto_device_map(a_ )
__snake_case : str = '''cpu'''
__snake_case : Optional[int] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , device_map=a_ , load_in_abit=a_ , llm_inta_enable_fpaa_cpu_offload=a_ )
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Dict = accelerator.prepare(a_ )
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
__snake_case : str = {'''distributed_type''': DistributedType.MULTI_GPU}
with init_empty_weights():
__snake_case : Any = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
model.tie_weights()
__snake_case : List[Any] = infer_auto_device_map(a_ )
__snake_case : Dict = 1
__snake_case : str = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Any = Accelerator()
# This should not work and get value error
with self.assertRaises(a_ ):
__snake_case : Tuple = accelerator.prepare(a_ )
PartialState._reset_state()
@slow
@require_bnb
@require_multi_gpu
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
from transformers import AutoModelForCausalLM
with init_empty_weights():
__snake_case : Dict = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , )
__snake_case : Tuple = infer_auto_device_map(a_ )
__snake_case : Tuple = 1
__snake_case : List[Any] = AutoModelForCausalLM.from_pretrained(
'''EleutherAI/gpt-neo-125m''' , load_in_abit=a_ , device_map=a_ , )
__snake_case : Tuple = Accelerator()
# This should work
__snake_case : Dict = accelerator.prepare(a_ )
@require_cuda
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : List[Any] = torch.nn.Linear(10 , 10 )
__snake_case : List[str] = torch.optim.SGD(model.parameters() , lr=0.01 )
__snake_case : Optional[Any] = Accelerator(cpu=a_ )
__snake_case : str = accelerator.prepare(a_ )
| 24 | 1 |
UpperCAmelCase_ = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
UpperCAmelCase_ = [{'type': 'code', 'content': INSTALL_CONTENT}]
UpperCAmelCase_ = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 201 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class snake_case__ ( UpperCamelCase , UpperCamelCase , unittest.TestCase):
a_ = StableDiffusionDiffEditPipeline
a_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
a_ = frozenset(
[]) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
a_ = frozenset([])
def A ( self : Tuple ) -> Optional[Any]:
torch.manual_seed(0 )
UpperCAmelCase_ : str = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=_A , )
UpperCAmelCase_ : Optional[Any] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_one=_A , )
UpperCAmelCase_ : Optional[int] = DDIMInverseScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule='''scaled_linear''' , clip_sample=_A , set_alpha_to_zero=_A , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , sample_size=1_28 , )
torch.manual_seed(0 )
UpperCAmelCase_ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='''gelu''' , projection_dim=5_12 , )
UpperCAmelCase_ : Union[str, Any] = CLIPTextModel(_A )
UpperCAmelCase_ : List[Any] = CLIPTokenizer.from_pretrained('''hf-internal-testing/tiny-random-clip''' )
UpperCAmelCase_ : Optional[int] = {
'''unet''': unet,
'''scheduler''': scheduler,
'''inverse_scheduler''': inverse_scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def A ( self : str , _A : List[str] , _A : Any=0 ) -> str:
UpperCAmelCase_ : Optional[Any] = floats_tensor((1, 16, 16) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Any = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : str = {
'''prompt''': '''a dog and a newt''',
'''mask_image''': mask,
'''image_latents''': latents,
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Tuple , _A : Optional[Any] , _A : Optional[Any]=0 ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : Dict = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : int = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Dict = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Any = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[Any] = {
'''image''': image,
'''source_prompt''': '''a cat and a frog''',
'''target_prompt''': '''a dog and a newt''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''num_maps_per_mask''': 2,
'''mask_encode_strength''': 1.0,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : int , _A : Tuple , _A : List[str]=0 ) -> Any:
UpperCAmelCase_ : str = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
UpperCAmelCase_ : List[str] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase_ : Optional[int] = Image.fromarray(np.uinta(_A ) ).convert('''RGB''' )
if str(_A ).startswith('''mps''' ):
UpperCAmelCase_ : Optional[int] = torch.manual_seed(_A )
else:
UpperCAmelCase_ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
UpperCAmelCase_ : Optional[int] = {
'''image''': image,
'''prompt''': '''a cat and a frog''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''inpaint_strength''': 1.0,
'''guidance_scale''': 6.0,
'''decode_latents''': True,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : List[str] ) -> Optional[Any]:
if not hasattr(self.pipeline_class , '''_optional_components''' ):
return
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : Any = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(_A , _A , _A )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs(_A )
UpperCAmelCase_ : str = pipe(**_A )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_A )
UpperCAmelCase_ : Any = self.pipeline_class.from_pretrained(_A )
pipe_loaded.to(_A )
pipe_loaded.set_progress_bar_config(disable=_A )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_A , _A ) is None , F"`{optional_component}` did not stay set to None after loading." , )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs(_A )
UpperCAmelCase_ : List[Any] = pipe_loaded(**_A )[0]
UpperCAmelCase_ : Any = np.abs(output - output_loaded ).max()
self.assertLess(_A , 1e-4 )
def A ( self : Tuple ) -> int:
UpperCAmelCase_ : Optional[Any] = '''cpu'''
UpperCAmelCase_ : Any = self.get_dummy_components()
UpperCAmelCase_ : Optional[int] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_mask_inputs(_A )
UpperCAmelCase_ : int = pipe.generate_mask(**_A )
UpperCAmelCase_ : Tuple = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
UpperCAmelCase_ : List[Any] = np.array([0] * 9 )
UpperCAmelCase_ : Dict = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def A ( self : str ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = '''cpu'''
UpperCAmelCase_ : str = self.get_dummy_components()
UpperCAmelCase_ : str = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : List[Any] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : int = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : List[str] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
def A ( self : Tuple ) -> Optional[Any]:
super().test_inference_batch_single_identical(expected_max_diff=5e-3 )
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Any = '''cpu'''
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_components()
UpperCAmelCase_ : Any = {'''beta_start''': 0.00_085, '''beta_end''': 0.012, '''beta_schedule''': '''scaled_linear'''}
UpperCAmelCase_ : Any = DPMSolverMultistepScheduler(**_A )
UpperCAmelCase_ : Optional[Any] = DPMSolverMultistepInverseScheduler(**_A )
UpperCAmelCase_ : Union[str, Any] = self.pipeline_class(**_A )
pipe.to(_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inversion_inputs(_A )
UpperCAmelCase_ : Optional[Any] = pipe.invert(**_A ).images
UpperCAmelCase_ : Tuple = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
UpperCAmelCase_ : List[Any] = np.array(
[0.5_150, 0.5_134, 0.5_043, 0.5_376, 0.4_694, 0.51_050, 0.5_015, 0.4_407, 0.4_799] , )
UpperCAmelCase_ : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_A , 1e-3 )
@require_torch_gpu
@slow
class snake_case__ ( unittest.TestCase):
def A ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def A ( cls : Dict ) -> List[Any]:
UpperCAmelCase_ : Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png''' )
UpperCAmelCase_ : int = raw_image.convert('''RGB''' ).resize((7_68, 7_68) )
UpperCAmelCase_ : Any = raw_image
def A ( self : List[Any] ) -> List[str]:
UpperCAmelCase_ : int = torch.manual_seed(0 )
UpperCAmelCase_ : str = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[str] = DDIMScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : List[str] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Tuple = '''a bowl of pears'''
UpperCAmelCase_ : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[str] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A ).latents
UpperCAmelCase_ : Any = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : str = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase_ : Dict = torch.manual_seed(0 )
UpperCAmelCase_ : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2-1''' , safety_checker=_A , torch_dtype=torch.floataa )
UpperCAmelCase_ : List[Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ : Union[str, Any] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''a bowl of fruit'''
UpperCAmelCase_ : Dict = '''a bowl of pears'''
UpperCAmelCase_ : Union[str, Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=_A , target_prompt=_A , generator=_A , )
UpperCAmelCase_ : List[Any] = pipe.invert(
prompt=_A , image=self.raw_image , inpaint_strength=0.7 , generator=_A , num_inference_steps=25 , ).latents
UpperCAmelCase_ : Dict = pipe(
prompt=_A , mask_image=_A , image_latents=_A , generator=_A , negative_prompt=_A , inpaint_strength=0.7 , num_inference_steps=25 , output_type='''numpy''' , ).images[0]
UpperCAmelCase_ : Tuple = (
np.array(
load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/diffedit/pears.png''' ).resize((7_68, 7_68) ) )
/ 2_55
)
assert np.abs((expected_image - image).max() ) < 5e-1
| 304 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__UpperCAmelCase : str = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
__UpperCAmelCase : Any = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
__UpperCAmelCase : List[Any] = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase__ ( self : List[Any] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def UpperCAmelCase__ ( self : List[str] , A : int , A : str , A : Any=4 , A : Tuple=False ):
__snake_case: Tuple = compute_bleu(
reference_corpus=A , translation_corpus=A , max_order=A , smooth=A )
(__snake_case): Optional[Any] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 354 |
from __future__ import annotations
import numpy as np
def A__ ( SCREAMING_SNAKE_CASE__) -> List[str]:
return np.maximum(0 , SCREAMING_SNAKE_CASE__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 293 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
snake_case : List[str] = logging.getLogger(__name__)
def __lowerCamelCase ( UpperCAmelCase_ : torch.nn.Module , UpperCAmelCase_ : BnbQuantizationConfig , UpperCAmelCase_ : Union[str, os.PathLike] = None , UpperCAmelCase_ : Optional[Dict[str, Union[int, str, torch.device]]] = None , UpperCAmelCase_ : Optional[List[str]] = None , UpperCAmelCase_ : Optional[Dict[Union[int, str], Union[int, str]]] = None , UpperCAmelCase_ : Optional[Union[str, os.PathLike]] = None , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :List[Any] = bnb_quantization_config.load_in_abit
a :List[Any] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
a :Tuple = []
# custom device map
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) and len(device_map.keys() ) > 1:
a :str = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
a :Optional[Any] = get_keys_to_not_convert(UpperCAmelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(UpperCAmelCase_ )
a :Tuple = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
a :List[str] = []
a :str = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(UpperCAmelCase_ )
# compatibility with peft
a :str = load_in_abit
a :Optional[Any] = load_in_abit
a :List[str] = get_parameter_device(UpperCAmelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
a :Any = replace_with_bnb_layers(UpperCAmelCase_ , UpperCAmelCase_ , modules_to_not_convert=UpperCAmelCase_ )
# convert param to the right dtype
a :Any = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
a :List[Any] = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
a :Tuple = getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(UpperCAmelCase_ ):
param.to(UpperCAmelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
F'''The model device type is {model_device.type}. However, cuda is needed for quantization.'''
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
F'''`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} ''' )
else:
with init_empty_weights():
a :Any = replace_with_bnb_layers(
UpperCAmelCase_ , UpperCAmelCase_ , modules_to_not_convert=UpperCAmelCase_ )
a :Dict = get_quantized_model_device_map(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , max_memory=UpperCAmelCase_ , no_split_module_classes=UpperCAmelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
a :Optional[int] = True
a :int = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=UpperCAmelCase_ , offload_state_dict=UpperCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(UpperCAmelCase_ , device_map=UpperCAmelCase_ , offload_dir=UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : str=None , UpperCAmelCase_ : str=None ):
"""simple docstring"""
if device_map is None:
if torch.cuda.is_available():
a :Any = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
a :Dict = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
a :List[Any] = {}
a :Union[str, Any] = special_dtypes
a :List[Any] = no_split_module_classes
a :Optional[int] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
a :List[Any] = get_balanced_memory(
UpperCAmelCase_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=UpperCAmelCase_ , **UpperCAmelCase_ , )
a :List[str] = max_memory
a :Dict = infer_auto_device_map(UpperCAmelCase_ , **UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
# check if don't have any quantized module on the cpu
a :str = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
a :Union[str, Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : str=None ):
"""simple docstring"""
if modules_to_not_convert is None:
a :Tuple = []
a , a :List[str] = _replace_with_bnb_layers(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def __lowerCamelCase ( UpperCAmelCase_ : Optional[Any] , UpperCAmelCase_ : Any , UpperCAmelCase_ : str=None , UpperCAmelCase_ : Any=None , ):
"""simple docstring"""
a :Optional[int] = False
for name, module in model.named_children():
if current_key_name is None:
a :Tuple = []
current_key_name.append(UpperCAmelCase_ )
if isinstance(UpperCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
a :Dict = '''.'''.join(UpperCAmelCase_ )
a :Optional[int] = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
a :int = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
a :Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=UpperCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
a :Dict = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
a :List[str] = module.weight.data
if module.bias is not None:
a :Tuple = module.bias.data
bnb_module.requires_grad_(UpperCAmelCase_ )
setattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Dict = True
if len(list(module.children() ) ) > 0:
a , a :List[str] = _replace_with_bnb_layers(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
a :Dict = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
with init_empty_weights():
a :Any = deepcopy(UpperCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
a :str = find_tied_parameters(UpperCAmelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
a :int = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
a :Dict = sum(UpperCAmelCase_ , [] )
a :Optional[Any] = len(UpperCAmelCase_ ) > 0
# Check if it is a base model
a :Optional[Any] = False
if hasattr(UpperCAmelCase_ , '''base_model_prefix''' ):
a :Optional[Any] = not hasattr(UpperCAmelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
a :int = list(model.named_children() )
a :Any = [list_modules[-1][0]]
# add last module together with tied weights
a :Tuple = set(UpperCAmelCase_ ) - set(UpperCAmelCase_ )
a :List[Any] = list(set(UpperCAmelCase_ ) ) + list(UpperCAmelCase_ )
# remove ".weight" from the keys
a :List[str] = ['''.weight''', '''.bias''']
a :Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
a :Any = name.replace(UpperCAmelCase_ , '''''' )
filtered_module_names.append(UpperCAmelCase_ )
return filtered_module_names
def __lowerCamelCase ( UpperCAmelCase_ : Dict ):
"""simple docstring"""
for m in model.modules():
if isinstance(UpperCAmelCase_ , bnb.nn.Linearabit ):
return True
return False
def __lowerCamelCase ( UpperCAmelCase_ : nn.Module ):
"""simple docstring"""
return next(parameter.parameters() ).device
def __lowerCamelCase ( UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : str , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Dict , UpperCAmelCase_ : List[Any] ):
"""simple docstring"""
if fpaa_statistics is None:
set_module_tensor_to_device(UpperCAmelCase_ , UpperCAmelCase_ , 0 , dtype=UpperCAmelCase_ , value=UpperCAmelCase_ )
a :Tuple = param_name
a :Optional[Any] = model
if "." in tensor_name:
a :str = tensor_name.split('''.''' )
for split in splits[:-1]:
a :Dict = getattr(UpperCAmelCase_ , UpperCAmelCase_ )
if new_module is None:
raise ValueError(F'''{module} has no attribute {split}.''' )
a :List[Any] = new_module
a :List[Any] = splits[-1]
# offload weights
a :Optional[Any] = False
offload_weight(module._parameters[tensor_name] , UpperCAmelCase_ , UpperCAmelCase_ , index=UpperCAmelCase_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , UpperCAmelCase_ , index=UpperCAmelCase_ , )
else:
offload_weight(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , index=UpperCAmelCase_ )
offload_weight(UpperCAmelCase_ , param_name.replace('''weight''' , '''SCB''' ) , UpperCAmelCase_ , index=UpperCAmelCase_ )
set_module_tensor_to_device(UpperCAmelCase_ , UpperCAmelCase_ , '''meta''' , dtype=UpperCAmelCase_ , value=torch.empty(*param.size() ) )
| 94 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a :List[Any] = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a :Dict = frequencies_dict
if not case_sensitive:
a :Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
a :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase_ ) ):
a :int = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a :List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a :int = decrypted_with_shift.count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a :Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a :int = min(
UpperCAmelCase_ , key=UpperCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a
) , (
a
) ,
) :Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 94 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase : List[Any] = logging.get_logger(__name__)
_lowercase : Tuple = {
'studio-ousia/luke-base': 'https://huggingface.co/studio-ousia/luke-base/resolve/main/config.json',
'studio-ousia/luke-large': 'https://huggingface.co/studio-ousia/luke-large/resolve/main/config.json',
}
class UpperCamelCase__( UpperCamelCase__ ):
__magic_name__ : List[Any] = """luke"""
def __init__( self : Tuple , lowerCAmelCase : Optional[int]=50267 , lowerCAmelCase : Union[str, Any]=500000 , lowerCAmelCase : int=768 , lowerCAmelCase : List[Any]=256 , lowerCAmelCase : int=12 , lowerCAmelCase : str=12 , lowerCAmelCase : Union[str, Any]=3072 , lowerCAmelCase : Union[str, Any]="gelu" , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.1 , lowerCAmelCase : Optional[Any]=512 , lowerCAmelCase : Optional[Any]=2 , lowerCAmelCase : Dict=0.02 , lowerCAmelCase : Any=1E-12 , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Tuple=None , lowerCAmelCase : Any=1 , lowerCAmelCase : List[Any]=0 , lowerCAmelCase : Union[str, Any]=2 , **lowerCAmelCase : List[Any] , )-> Any:
"""simple docstring"""
super().__init__(pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , **__a )
UpperCAmelCase = vocab_size
UpperCAmelCase = entity_vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = entity_emb_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_act
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = type_vocab_size
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = use_entity_aware_attention
UpperCAmelCase = classifier_dropout
| 367 |
'''simple docstring'''
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
_lowercase : List[str] = """0.12""" # assumed parallelism: 8
if is_torch_available():
import torch
def lowerCamelCase__ ( A : str , A : str , A : List[Any]=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase = random.Random()
UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
UpperCAmelCase = []
for _ in range(A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
UpperCAmelCase = np.array(A , dtype=jnp.intaa ).reshape(A )
return output
def lowerCamelCase__ ( A : int , A : Optional[int]=None ):
'''simple docstring'''
UpperCAmelCase = ids_tensor(A , vocab_size=2 , rng=A )
# make sure that at least one token is attended to for each batch
UpperCAmelCase = 1
return attn_mask
@require_flax
class UpperCamelCase__:
__magic_name__ : Optional[int] = None
__magic_name__ : Optional[Any] = ()
def a__( self : str )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
UpperCAmelCase = 2
UpperCAmelCase = inputs['''input_ids'''].shape[-1] // 2
UpperCAmelCase = inputs['''input_ids'''][:max_batch_size, :sequence_length]
UpperCAmelCase = jnp.ones_like(lowerCAmelCase )
UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def a__( self : Dict )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase = pt_model_class(lowerCAmelCase ).eval()
UpperCAmelCase = load_flax_weights_in_pytorch_model(lowerCAmelCase , flax_model.params )
UpperCAmelCase = flax_model.generate(lowerCAmelCase ).sequences
UpperCAmelCase = pt_model.generate(torch.tensor(lowerCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def a__( self : Any )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : str )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : List[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = False
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def a__( self : Tuple )-> List[str]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = True
UpperCAmelCase = max_length
UpperCAmelCase = 0.8
UpperCAmelCase = 10
UpperCAmelCase = 0.3
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Tuple:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
UpperCAmelCase = max_length
UpperCAmelCase = 2
UpperCAmelCase = 1
UpperCAmelCase = 8
UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Union[str, Any] )-> Any:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = False
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Optional[Any] )-> int:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = True
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def a__( self : Tuple )-> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
UpperCAmelCase = 2
UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
UpperCAmelCase = model_class(lowerCAmelCase )
UpperCAmelCase = model.generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , lowerCAmelCase )
UpperCAmelCase = jit(model.generate )
UpperCAmelCase = jit_generate(lowerCAmelCase , attention_mask=lowerCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class UpperCamelCase__( unittest.TestCase ):
def a__( self : Union[str, Any] )-> Optional[int]:
"""simple docstring"""
UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-bert''' )
UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-bert-flax-only''' )
UpperCAmelCase = '''Hello world'''
UpperCAmelCase = tokenizer(lowerCAmelCase , return_tensors='''np''' ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(lowerCAmelCase , '''do_samples''' ):
model.generate(lowerCAmelCase , do_samples=lowerCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(lowerCAmelCase , '''foo''' ):
UpperCAmelCase = {'''foo''': '''bar'''}
model.generate(lowerCAmelCase , **lowerCAmelCase )
| 91 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ ):
return 1_0 - x * x
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
# Bolzano theory in order to find if there is a root between a and b
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) >= 0:
raise ValueError("Wrong space!" )
lowerCamelCase_ = a
while (b - a) >= 0.01:
# Find middle point
lowerCamelCase_ = (a + b) / 2
# Check if middle point is root
if equation(lowerCamelCase__ ) == 0.0:
break
# Decide the side to repeat the steps
if equation(lowerCamelCase__ ) * equation(lowerCamelCase__ ) < 0:
lowerCamelCase_ = c
else:
lowerCamelCase_ = c
return c
if __name__ == "__main__":
import doctest
doctest.testmod()
print(bisection(-2, 5))
print(bisection(0, 6))
| 19 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
__A =logging.get_logger(__name__)
__A =list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
__A =tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _SCREAMING_SNAKE_CASE :
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Model type selected in the list: ' + ', '.join(snake_case_ )} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowerCAmelCase__ = field(
default=1_28 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowerCAmelCase__ = field(
default=1_28 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowerCAmelCase__ = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowerCAmelCase__ = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase__ = field(
default=snake_case_ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowerCAmelCase__ = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowerCAmelCase__ = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowerCAmelCase__ = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 'train'
lowerCAmelCase__ = 'dev'
class _SCREAMING_SNAKE_CASE ( snake_case_ ):
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
lowerCAmelCase__ = 42
def __init__( self , lowercase , lowercase , lowercase = None , lowercase = Split.train , lowercase = False , lowercase = None , lowercase = "pt" , ) -> List[str]:
lowerCamelCase_ = args
lowerCamelCase_ = is_language_sensitive
lowerCamelCase_ = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(lowercase , lowercase ):
try:
lowerCamelCase_ = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
lowerCamelCase_ = mode
# Load data features from cache or dataset file
lowerCamelCase_ = "v2" if args.version_2_with_negative else "v1"
lowerCamelCase_ = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCamelCase_ = cached_features_file + ".lock"
with FileLock(lowercase ):
if os.path.exists(lowercase ) and not args.overwrite_cache:
lowerCamelCase_ = time.time()
lowerCamelCase_ = torch.load(lowercase )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCamelCase_ = self.old_features["features"]
lowerCamelCase_ = self.old_features.get("dataset" , lowercase )
lowerCamelCase_ = self.old_features.get("examples" , lowercase )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
" future run" )
else:
if mode == Split.dev:
lowerCamelCase_ = self.processor.get_dev_examples(args.data_dir )
else:
lowerCamelCase_ = self.processor.get_train_examples(args.data_dir )
lowerCamelCase_ , lowerCamelCase_ = squad_convert_examples_to_features(
examples=self.examples , tokenizer=lowercase , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=lowercase , )
lowerCamelCase_ = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , lowercase , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ) -> Tuple:
return len(self.features )
def __getitem__( self , lowercase ) -> Dict[str, torch.Tensor]:
# Convert to Tensors and build dataset
lowerCamelCase_ = self.features[i]
lowerCamelCase_ = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCamelCase_ = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCamelCase_ = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCamelCase_ = torch.tensor(feature.start_position , dtype=torch.long )
lowerCamelCase_ = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 19 | 1 |
import math
import time
from transformers import Trainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class __lowerCAmelCase ( __lowercase):
def __init__( self , *lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> List[Any]:
'''simple docstring'''
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
a__ : int =eval_examples
a__ : List[str] =post_process_function
def _lowercase ( self , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__=None , lowerCAmelCase__ = "eval" ) -> Tuple:
'''simple docstring'''
a__ : List[Any] =self.eval_dataset if eval_dataset is None else eval_dataset
a__ : Dict =self.get_eval_dataloader(UpperCAmelCase__ )
a__ : int =self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
a__ : str =self.compute_metrics
a__ : List[str] =None
a__ : Union[str, Any] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a__ : Optional[int] =time.time()
try:
a__ : Any =eval_loop(
UpperCAmelCase__ , description="Evaluation" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
a__ : Dict =compute_metrics
a__ : int =self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
a__ : Any =self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions )
a__ : Union[str, Any] =self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a__ : Union[str, Any] =metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
else:
a__ : List[str] =output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(UpperCAmelCase__ )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
a__ : str =self.callback_handler.on_evaluate(self.args , self.state , self.control , UpperCAmelCase__ )
return metrics
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__=None , lowerCAmelCase__ = "test" ) -> Optional[Any]:
'''simple docstring'''
a__ : Any =self.get_test_dataloader(UpperCAmelCase__ )
# Temporarily disable metric computation, we will do it in the loop here.
a__ : Union[str, Any] =self.compute_metrics
a__ : List[Any] =None
a__ : Union[str, Any] =self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
a__ : str =time.time()
try:
a__ : int =eval_loop(
UpperCAmelCase__ , description="Prediction" , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=UpperCAmelCase__ , metric_key_prefix=UpperCAmelCase__ , )
finally:
a__ : str =compute_metrics
a__ : Optional[Any] =self.args.eval_batch_size * self.args.world_size
if F'''{metric_key_prefix}_jit_compilation_time''' in output.metrics:
start_time += output.metrics[F'''{metric_key_prefix}_jit_compilation_time''']
output.metrics.update(
speed_metrics(
UpperCAmelCase__ , UpperCAmelCase__ , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
a__ : Tuple =self.post_process_function(UpperCAmelCase__ , UpperCAmelCase__ , output.predictions , "predict" )
a__ : List[Any] =self.compute_metrics(UpperCAmelCase__ )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(F'''{metric_key_prefix}_''' ):
a__ : str =metrics.pop(UpperCAmelCase__ )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=UpperCAmelCase__ )
| 361 |
def _A ( SCREAMING_SNAKE_CASE : float , SCREAMING_SNAKE_CASE : float ):
"""simple docstring"""
return price * (1 + tax_rate)
if __name__ == "__main__":
print(F"""{price_plus_tax(100, 0.2_5) = }""")
print(F"""{price_plus_tax(1_2_5.5_0, 0.0_5) = }""")
| 148 | 0 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class __lowerCAmelCase (unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = '''ylacombe/bark-small'''
lowercase__ = tempfile.mkdtemp()
lowercase__ = '''en_speaker_1'''
lowercase__ = '''This is a test string'''
lowercase__ = '''speaker_embeddings_path.json'''
lowercase__ = '''speaker_embeddings'''
def UpperCamelCase__ (self : List[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **UpperCamelCase )
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=UpperCamelCase )
processor.save_pretrained(self.tmpdirname )
lowercase__ = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def UpperCamelCase__ (self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
lowercase__ = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
lowercase__ = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token='''(BOS)''' , eos_token='''(EOS)''' , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def UpperCamelCase__ (self : Optional[int] ):
'''simple docstring'''
lowercase__ = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
lowercase__ = 35
lowercase__ = 2
lowercase__ = 8
lowercase__ = {
'''semantic_prompt''': np.ones(UpperCamelCase ),
'''coarse_prompt''': np.ones((nb_codebooks_coarse, seq_len) ),
'''fine_prompt''': np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
lowercase__ = processor(text=self.input_string , voice_preset=UpperCamelCase )
lowercase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from npz file
lowercase__ = os.path.join(self.tmpdirname , '''file.npz''' )
np.savez(UpperCamelCase , **UpperCamelCase )
lowercase__ = processor(text=self.input_string , voice_preset=UpperCamelCase )
lowercase__ = inputs['''history_prompt''']
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(UpperCamelCase , np.array([] ) ).tolist() )
# test loading voice preset from the hub
lowercase__ = processor(text=self.input_string , voice_preset=self.voice_preset )
def UpperCamelCase__ (self : Any ):
'''simple docstring'''
lowercase__ = self.get_tokenizer()
lowercase__ = BarkProcessor(tokenizer=UpperCamelCase )
lowercase__ = processor(text=self.input_string )
lowercase__ = tokenizer(
self.input_string , padding='''max_length''' , max_length=256 , add_special_tokens=UpperCamelCase , return_attention_mask=UpperCamelCase , return_token_type_ids=UpperCamelCase , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 2 | """simple docstring"""
_a : Tuple= 8.3_1_4_4_5_9_8
def __UpperCAmelCase ( UpperCAmelCase_ : float , UpperCAmelCase_ : float ) -> float:
'''simple docstring'''
if temperature < 0:
raise Exception('Temperature cannot be less than 0 K' )
if molar_mass <= 0:
raise Exception('Molar mass cannot be less than or equal to 0 kg/mol' )
else:
return (3 * UNIVERSAL_GAS_CONSTANT * temperature / molar_mass) ** 0.5
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# example
_a : Any= 300
_a : Optional[Any]= 28
_a : Optional[int]= rms_speed_of_molecule(temperature, molar_mass)
print(f'''Vrms of Nitrogen gas at 300 K is {vrms} m/s''')
| 172 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
a :Tuple = logging.get_logger(__name__)
def _lowercase ( __lowerCAmelCase ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Any = """huggingface/label-files"""
SCREAMING_SNAKE_CASE__ : Any = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE__ : List[str] = json.load(open(hf_hub_download(__lowerCAmelCase , __lowerCAmelCase , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE__ : Tuple = {int(__lowerCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : List[str] = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Any = """std_conv""" if """bit""" in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
SCREAMING_SNAKE_CASE__ : Any = BitConfig(
conv_layer=__lowerCAmelCase , num_labels=1000 , idalabel=__lowerCAmelCase , labelaid=__lowerCAmelCase , )
return config
def _lowercase ( __lowerCAmelCase ) -> Tuple:
if "stem.conv" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""stem.conv""" , """bit.embedder.convolution""" )
if "blocks" in name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = name.replace("""blocks""" , """layers""" )
if "head.fc" in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace("""head.fc""" , """classifier.1""" )
if name.startswith("""norm""" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = """bit.""" + name
if "bit" not in name and "classifier" not in name:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = """bit.encoder.""" + name
return name
def _lowercase ( ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Tuple = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE__ : Dict = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw )
return im
@torch.no_grad()
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=False ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict = get_config(__lowerCAmelCase )
# load original model from timm
SCREAMING_SNAKE_CASE__ : str = create_model(__lowerCAmelCase , pretrained=__lowerCAmelCase )
timm_model.eval()
# load state_dict of original model
SCREAMING_SNAKE_CASE__ : Optional[int] = timm_model.state_dict()
for key in state_dict.copy().keys():
SCREAMING_SNAKE_CASE__ : Optional[int] = state_dict.pop(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = val.squeeze() if """head""" in key else val
# load HuggingFace model
SCREAMING_SNAKE_CASE__ : List[Any] = BitForImageClassification(__lowerCAmelCase )
model.eval()
model.load_state_dict(__lowerCAmelCase )
# create image processor
SCREAMING_SNAKE_CASE__ : List[Any] = create_transform(**resolve_data_config({} , model=__lowerCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = transform.transforms
SCREAMING_SNAKE_CASE__ : List[Any] = {
"""bilinear""": PILImageResampling.BILINEAR,
"""bicubic""": PILImageResampling.BICUBIC,
"""nearest""": PILImageResampling.NEAREST,
}
SCREAMING_SNAKE_CASE__ : Optional[int] = BitImageProcessor(
do_resize=__lowerCAmelCase , size={"""shortest_edge""": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=__lowerCAmelCase , crop_size={"""height""": timm_transforms[1].size[0], """width""": timm_transforms[1].size[1]} , do_normalize=__lowerCAmelCase , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : int = transform(__lowerCAmelCase ).unsqueeze(0 )
SCREAMING_SNAKE_CASE__ : Tuple = processor(__lowerCAmelCase , return_tensors="""pt""" ).pixel_values
# verify pixel values
assert torch.allclose(__lowerCAmelCase , __lowerCAmelCase )
# verify logits
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.logits
print("""Logits:""" , logits[0, :3] )
print("""Predicted class:""" , model.config.idalabel[logits.argmax(-1 ).item()] )
SCREAMING_SNAKE_CASE__ : List[str] = timm_model(__lowerCAmelCase )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__lowerCAmelCase , outputs.logits , atol=1E-3 )
print("""Looks ok!""" )
if pytorch_dump_folder_path is not None:
Path(__lowerCAmelCase ).mkdir(exist_ok=__lowerCAmelCase )
print(F'''Saving model {model_name} and processor to {pytorch_dump_folder_path}''' )
model.save_pretrained(__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
if push_to_hub:
print(F'''Pushing model {model_name} and processor to the hub''' )
model.push_to_hub(F'''ybelkada/{model_name}''' )
processor.push_to_hub(F'''ybelkada/{model_name}''' )
if __name__ == "__main__":
a :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="resnetv2_50x1_bitm",
type=str,
help="Name of the BiT timm model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model to the hub.",
)
a :Optional[Any] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 363 |
"""simple docstring"""
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number | (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number & ~(1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return number ^ (1 << position)
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
return ((number >> position) & 1) == 1
def _lowercase ( __lowerCAmelCase , __lowerCAmelCase ) -> int:
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 56 | 0 |
'''simple docstring'''
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowercase__ ( __UpperCamelCase )-> Tuple:
return getitem, k
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> Any:
return setitem, k, v
def lowercase__ ( __UpperCamelCase )-> Optional[Any]:
return delitem, k
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , *__UpperCamelCase )-> str:
try:
return fun(__UpperCamelCase , *__UpperCamelCase ), None
except Exception as e:
return None, e
SCREAMING_SNAKE_CASE__ = (
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
)
SCREAMING_SNAKE_CASE__ = [
_set('key_a', 'val_a'),
_set('key_a', 'val_b'),
]
SCREAMING_SNAKE_CASE__ = [
_set('key_a', 'val_a'),
_set('key_b', 'val_b'),
_del('key_a'),
_del('key_b'),
_set('key_a', 'val_a'),
_del('key_a'),
]
SCREAMING_SNAKE_CASE__ = [
_get('key_a'),
_del('key_a'),
_set('key_a', 'val_a'),
_del('key_a'),
_del('key_a'),
_get('key_a'),
]
SCREAMING_SNAKE_CASE__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
SCREAMING_SNAKE_CASE__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('key_a', 'val_b'),
]
@pytest.mark.parametrize(
"""operations""" , (
pytest.param(_add_items , id="""add items""" ),
pytest.param(_overwrite_items , id="""overwrite items""" ),
pytest.param(_delete_items , id="""delete items""" ),
pytest.param(_access_absent_items , id="""access absent items""" ),
pytest.param(_add_with_resize_up , id="""add with resize up""" ),
pytest.param(_add_with_resize_down , id="""add with resize down""" ),
) , )
def lowercase__ ( __UpperCamelCase )-> Union[str, Any]:
UpperCamelCase = HashMap(initial_block_size=4 )
UpperCamelCase = {}
for _, (fun, *args) in enumerate(__UpperCamelCase ):
UpperCamelCase ,UpperCamelCase = _run_operation(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase )
UpperCamelCase ,UpperCamelCase = _run_operation(__UpperCamelCase , __UpperCamelCase , *__UpperCamelCase )
assert my_res == py_res
assert str(__UpperCamelCase ) == str(__UpperCamelCase )
assert set(__UpperCamelCase ) == set(__UpperCamelCase )
assert len(__UpperCamelCase ) == len(__UpperCamelCase )
assert set(my.items() ) == set(py.items() )
def lowercase__ ( )-> int:
def is_public(__UpperCamelCase ) -> bool:
return not name.startswith("""_""" )
UpperCamelCase = {name for name in dir({} ) if is_public(__UpperCamelCase )}
UpperCamelCase = {name for name in dir(HashMap() ) if is_public(__UpperCamelCase )}
assert dict_public_names > hash_public_names
| 321 |
'''simple docstring'''
import datasets
from .evaluate import evaluate
SCREAMING_SNAKE_CASE__ = '\\n@article{hendrycks2021cuad,\n title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},\n author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},\n journal={arXiv preprint arXiv:2103.06268},\n year={2021}\n}\n'
SCREAMING_SNAKE_CASE__ = '\nThis metric wrap the official scoring script for version 1 of the Contract\nUnderstanding Atticus Dataset (CUAD).\nContract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510\ncommercial legal contracts that have been manually labeled to identify 41 categories of important\nclauses that lawyers look for when reviewing contracts in connection with corporate transactions.\n'
SCREAMING_SNAKE_CASE__ = '\nComputes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).\nArgs:\n predictions: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair as given in the references (see below)\n - \'prediction_text\': list of possible texts for the answer, as a list of strings\n depending on a threshold on the confidence probability of each prediction.\n references: List of question-answers dictionaries with the following key-values:\n - \'id\': id of the question-answer pair (see above),\n - \'answers\': a Dict in the CUAD dataset format\n {\n \'text\': list of possible texts for the answer, as a list of strings\n \'answer_start\': list of start positions for the answer, as a list of ints\n }\n Note that answer_start values are not taken into account to compute the metric.\nReturns:\n \'exact_match\': Exact match (the normalized answer exactly match the gold answer)\n \'f1\': The F-score of predicted tokens versus the gold answer\n \'aupr\': Area Under the Precision-Recall curve\n \'prec_at_80_recall\': Precision at 80% recall\n \'prec_at_90_recall\': Precision at 90% recall\nExamples:\n >>> predictions = [{\'prediction_text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\'], \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> references = [{\'answers\': {\'answer_start\': [143, 49], \'text\': [\'The seller:\', \'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.\']}, \'id\': \'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties\'}]\n >>> cuad_metric = datasets.load_metric("cuad")\n >>> results = cuad_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'exact_match\': 100.0, \'f1\': 100.0, \'aupr\': 0.0, \'prec_at_80_recall\': 1.0, \'prec_at_90_recall\': 1.0}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def A__ ( self ) -> Tuple:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": {
"""id""": datasets.Value("""string""" ),
"""prediction_text""": datasets.features.Sequence(datasets.Value("""string""" ) ),
},
"""references""": {
"""id""": datasets.Value("""string""" ),
"""answers""": datasets.features.Sequence(
{
"""text""": datasets.Value("""string""" ),
"""answer_start""": datasets.Value("""int32""" ),
} ),
},
} ) , codebase_urls=["""https://www.atticusprojectai.org/cuad"""] , reference_urls=["""https://www.atticusprojectai.org/cuad"""] , )
def A__ ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = {prediction["""id"""]: prediction["""prediction_text"""] for prediction in predictions}
UpperCamelCase = [
{
"""paragraphs""": [
{
"""qas""": [
{
"""answers""": [{"""text""": answer_text} for answer_text in ref["""answers"""]["""text"""]],
"""id""": ref["""id"""],
}
for ref in references
]
}
]
}
]
UpperCamelCase = evaluate(dataset=_SCREAMING_SNAKE_CASE , predictions=_SCREAMING_SNAKE_CASE )
return score
| 321 | 1 |
'''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
_lowercase : List[Any] = logging.get_logger(__name__)
@add_end_docstrings(
a__ , R"\n top_k (`int`, defaults to 5):\n The number of predictions to return.\n targets (`str` or `List[str]`, *optional*):\n When passed, the model will limit the scores to the passed targets instead of looking up in the whole\n vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting\n token will be used (with a warning, and that might be slower).\n\n " , )
class UpperCamelCase__( a__ ):
def a__( self : Any , lowerCAmelCase : str )-> np.ndarray:
"""simple docstring"""
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ )
else:
raise ValueError('''Unsupported framework''' )
return masked_index
def a__( self : str , lowerCAmelCase : str )-> np.ndarray:
"""simple docstring"""
UpperCAmelCase = self.get_masked_index(lowerCAmelCase__ )
UpperCAmelCase = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , F"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def a__( self : str , lowerCAmelCase : Dict )-> Dict:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input['''input_ids'''][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__ )
def a__( self : Union[str, Any] , lowerCAmelCase : Dict , lowerCAmelCase : str=None , **lowerCAmelCase : List[Any] )-> Dict[str, GenericTensor]:
"""simple docstring"""
if return_tensors is None:
UpperCAmelCase = self.framework
UpperCAmelCase = self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.ensure_exactly_one_mask_token(lowerCAmelCase__ )
return model_inputs
def a__( self : Optional[int] , lowerCAmelCase : Union[str, Any] )-> int:
"""simple docstring"""
UpperCAmelCase = self.model(**lowerCAmelCase__ )
UpperCAmelCase = model_inputs["input_ids"]
return model_outputs
def a__( self : str , lowerCAmelCase : str , lowerCAmelCase : Optional[int]=5 , lowerCAmelCase : Any=None )-> Union[str, Any]:
"""simple docstring"""
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase = target_ids.shape[0]
UpperCAmelCase = model_outputs["input_ids"][0]
UpperCAmelCase = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase = outputs.numpy()
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = stable_softmax(lowerCAmelCase__ , axis=-1 )
if target_ids is not None:
UpperCAmelCase = tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase = tf.expand_dims(lowerCAmelCase__ , 0 )
UpperCAmelCase = tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
UpperCAmelCase = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase = outputs[0, masked_index, :]
UpperCAmelCase = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase = probs[..., target_ids]
UpperCAmelCase = probs.topk(lowerCAmelCase__ )
UpperCAmelCase = []
UpperCAmelCase = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase = target_ids[p].tolist()
UpperCAmelCase = p
# Filter padding out:
UpperCAmelCase = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase = self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
UpperCAmelCase = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
if single_mask:
return result[0]
return result
def a__( self : Optional[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Any=None )-> Dict:
"""simple docstring"""
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
UpperCAmelCase = [targets]
try:
UpperCAmelCase = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase = {}
UpperCAmelCase = []
for target in targets:
UpperCAmelCase = vocab.get(lowerCAmelCase__ , lowerCAmelCase__ )
if id_ is None:
UpperCAmelCase = self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["input_ids"]
if len(lowerCAmelCase__ ) == 0:
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
'''We cannot replace it with anything meaningful, ignoring it''' )
continue
UpperCAmelCase = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F"""The specified target token `{target}` does not exist in the model vocabulary. """
F"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase = list(set(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) == 0:
raise ValueError('''At least one target must be provided when passed.''' )
UpperCAmelCase = np.array(lowerCAmelCase__ )
return target_ids
def a__( self : Optional[Any] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : int=None )-> int:
"""simple docstring"""
UpperCAmelCase = {}
if targets is not None:
UpperCAmelCase = self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__ )
UpperCAmelCase = target_ids
if top_k is not None:
UpperCAmelCase = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
'''fill-mask''' , self.model.base_model_prefix , '''The tokenizer does not define a `mask_token`.''' )
return {}, {}, postprocess_params
def __call__( self : Dict , lowerCAmelCase : List[str] , *lowerCAmelCase : Optional[int] , **lowerCAmelCase : Optional[Any] )-> str:
"""simple docstring"""
UpperCAmelCase = super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1:
return outputs[0]
return outputs
| 363 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def lowerCamelCase__ ( A : str="" ):
'''simple docstring'''
UpperCAmelCase = tempfile.mkdtemp()
return os.path.join(A , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> int:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = AgentAudio(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCAmelCase ) )
# Ensure that the file contains the same value as the original tensor
UpperCAmelCase , UpperCAmelCase = sf.read(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , torch.tensor(lowerCAmelCase ) , atol=1E-4 ) )
def a__( self : Union[str, Any] )-> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCAmelCase = get_new_path(suffix='''.wav''' )
sf.write(lowerCAmelCase , lowerCAmelCase , 16000 )
UpperCAmelCase = AgentAudio(lowerCAmelCase )
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type.to_raw() , atol=1E-4 ) )
self.assertEqual(agent_type.to_string() , lowerCAmelCase )
@require_vision
@require_torch
class UpperCamelCase__( unittest.TestCase ):
def a__( self : List[Any] )-> Any:
"""simple docstring"""
UpperCAmelCase = torch.randint(0 , 256 , (64, 64, 3) )
UpperCAmelCase = AgentImage(lowerCAmelCase )
UpperCAmelCase = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCAmelCase , agent_type._tensor , atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : List[Any] )-> List[Any]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
def a__( self : Optional[Any] )-> List[str]:
"""simple docstring"""
UpperCAmelCase = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCAmelCase = Image.open(lowerCAmelCase )
UpperCAmelCase = AgentImage(lowerCAmelCase )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCAmelCase ) )
class UpperCamelCase__( unittest.TestCase ):
def a__( self : int )-> Any:
"""simple docstring"""
UpperCAmelCase = '''Hey!'''
UpperCAmelCase = AgentText(lowerCAmelCase )
self.assertEqual(lowerCAmelCase , agent_type.to_string() )
self.assertEqual(lowerCAmelCase , agent_type.to_raw() )
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 91 | 0 |
import json
import multiprocessing as mp
import re
from collections import defaultdict
from functools import partial
from typing import Dict, List, Optional, Set, Tuple, Type
from datasets import Dataset
from datasketch import MinHash, MinHashLSH
from dpu_utils.utils.iterators import ThreadedIterator
from tqdm import tqdm
_snake_case = re.compile("[^A-Za-z_0-9]")
# parameters used in DuplicationIndex
_snake_case = 10
_snake_case = 256
def lowerCAmelCase_ ( snake_case_ ):
if len(snake_case_ ) < MIN_NUM_TOKENS:
return None
_A : Tuple = MinHash(num_perm=snake_case_ )
for token in set(snake_case_ ):
min_hash.update(token.encode() )
return min_hash
def lowerCAmelCase_ ( snake_case_ ):
return {t for t in NON_ALPHA.split(snake_case_ ) if len(t.strip() ) > 0}
class lowercase :
def __init__( self , *,
_a = 0.85 , ) -> List[Any]:
_A : Any = duplication_jaccard_threshold
_A : int = NUM_PERM
_A : List[str] = MinHashLSH(threshold=self._duplication_jaccard_threshold , num_perm=self._num_perm )
_A : str = defaultdict(_a )
def a__ ( self , _a , _a ) -> None:
_A : Any = self._index.query(_a )
if code_key in self._index.keys:
print(F'''Duplicate key {code_key}''' )
return
self._index.insert(_a , _a )
if len(_a ) > 0:
for base_duplicate in close_duplicates:
if base_duplicate in self._duplicate_clusters:
self._duplicate_clusters[base_duplicate].add(_a )
break
else:
self._duplicate_clusters[close_duplicates[0]].add(_a )
def a__ ( self ) -> List[List[Dict]]:
_A : List[Any] = []
for base, duplicates in self._duplicate_clusters.items():
_A : Dict = [base] + list(_a )
# reformat the cluster to be a list of dict
_A : Tuple = [{"""base_index""": el[0], """repo_name""": el[1], """path""": el[2]} for el in cluster]
duplicate_clusters.append(_a )
return duplicate_clusters
def a__ ( self , _a ) -> None:
_A : str = self.get_duplicate_clusters()
with open(_a , """w""" ) as f:
json.dump(_a , _a )
def lowerCAmelCase_ ( snake_case_ ):
_A , _A : Tuple = element
_A : Any = get_min_hash([t for t in NON_ALPHA.split(data["""content"""] ) if len(t.strip() ) > 0] )
if min_hash is not None:
return (index, data["repo_name"], data["path"]), min_hash
def lowerCAmelCase_ ( snake_case_ ):
with mp.Pool() as pool:
for data in pool.imap_unordered(
_compute_min_hash,ThreadedIterator(snake_case_,max_queue_size=10000 ),chunksize=100,):
if data is not None:
yield data
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : int = DuplicationIndex(duplication_jaccard_threshold=snake_case_ )
for filename, min_hash in tqdm(ThreadedIterator(minhash_iter(enumerate(snake_case_ ) ),max_queue_size=100 ) ):
di.add(snake_case_,snake_case_ )
# Returns a List[Cluster] where Cluster is List[str] with the filenames.
return di.get_duplicate_clusters()
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Any = get_tokens(snake_case_ )
_A : Dict = get_tokens(snake_case_ )
return len(tokensa & tokensa ) / len(tokensa | tokensa )
_snake_case = None
def lowerCAmelCase_ ( snake_case_,snake_case_ ):
_A : Optional[Any] = []
for elementa in cluster:
_A : List[str] = _shared_dataset[elementa["""base_index"""]]["""content"""]
for elementa in extremes:
_A : Union[str, Any] = _shared_dataset[elementa["""base_index"""]]["""content"""]
if jaccard_similarity(snake_case_,snake_case_ ) >= jaccard_threshold:
elementa["copies"] += 1
break
else:
_A : int = 1
extremes.append(snake_case_ )
return extremes
def lowerCAmelCase_ ( snake_case_,snake_case_,snake_case_ ):
global _shared_dataset
_A : Any = dataset
_A : List[Any] = []
_A : Optional[int] = partial(_find_cluster_extremes_shared,jaccard_threshold=snake_case_ )
with mp.Pool() as pool:
for extremes in tqdm(
pool.imap_unordered(
snake_case_,snake_case_,),total=len(snake_case_ ),):
extremes_list.append(snake_case_ )
return extremes_list
def lowerCAmelCase_ ( snake_case_,snake_case_ = 0.85 ):
_A : List[Any] = make_duplicate_clusters(snake_case_,snake_case_ )
_A : str = {x["""base_index"""] for cluster in duplicate_clusters for x in cluster}
_A : List[str] = {}
_A : Optional[Any] = find_extremes(snake_case_,snake_case_,snake_case_ )
for extremes in extremes_clusters:
for element in extremes:
_A : int = element
_A : Union[str, Any] = duplicate_indices - set(extreme_dict.keys() )
_A : int = dataset.filter(lambda snake_case_,snake_case_ : idx not in remove_indices,with_indices=snake_case_ )
# update duplicate_clusters
for cluster in duplicate_clusters:
for element in cluster:
_A : int = element["""base_index"""] in extreme_dict
if element["is_extreme"]:
_A : Any = extreme_dict[element["""base_index"""]]["""copies"""]
print(f'''Original dataset size: {len(snake_case_ )}''' )
print(f'''Number of duplicate clusters: {len(snake_case_ )}''' )
print(f'''Files in duplicate cluster: {len(snake_case_ )}''' )
print(f'''Unique files in duplicate cluster: {len(snake_case_ )}''' )
print(f'''Filtered dataset size: {len(snake_case_ )}''' )
return ds_filter, duplicate_clusters
| 26 |
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a (_lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
__UpperCAmelCase : List[str] = DiTPipeline
__UpperCAmelCase : Tuple = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
__UpperCAmelCase : Any = PipelineTesterMixin.required_optional_params - {
"latents",
"num_images_per_prompt",
"callback",
"callback_steps",
}
__UpperCAmelCase : Optional[int] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
__UpperCAmelCase : Optional[int] = False
def __snake_case ( self : Optional[int] ) -> str:
torch.manual_seed(0 )
__snake_case : Dict = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=lowerCamelCase , activation_fn="gelu-approximate" , num_embeds_ada_norm=1000 , norm_type="ada_norm_zero" , norm_elementwise_affine=lowerCamelCase , )
__snake_case : Union[str, Any] = AutoencoderKL()
__snake_case : int = DDIMScheduler()
__snake_case : Tuple = {"transformer": transformer.eval(), "vae": vae.eval(), "scheduler": scheduler}
return components
def __snake_case ( self : Optional[Any] , lowerCamelCase : Any , lowerCamelCase : Tuple=0 ) -> int:
if str(lowerCamelCase ).startswith("mps" ):
__snake_case : str = torch.manual_seed(lowerCamelCase )
else:
__snake_case : Dict = torch.Generator(device=lowerCamelCase ).manual_seed(lowerCamelCase )
__snake_case : Optional[Any] = {
"class_labels": [1],
"generator": generator,
"num_inference_steps": 2,
"output_type": "numpy",
}
return inputs
def __snake_case ( self : str ) -> List[str]:
__snake_case : Tuple = "cpu"
__snake_case : int = self.get_dummy_components()
__snake_case : str = self.pipeline_class(**lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
__snake_case : Tuple = self.get_dummy_inputs(lowerCamelCase )
__snake_case : Optional[Any] = pipe(**lowerCamelCase ).images
__snake_case : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__snake_case : int = np.array([0.29_46, 0.66_01, 0.43_29, 0.32_96, 0.41_44, 0.53_19, 0.72_73, 0.50_13, 0.44_57] )
__snake_case : Optional[int] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase , 1E-3 )
def __snake_case ( self : List[str] ) -> Tuple:
self._test_inference_batch_single_identical(relax_max_difference=lowerCamelCase , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def __snake_case ( self : Tuple ) -> str:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class a (unittest.TestCase ):
"""simple docstring"""
def __snake_case ( self : Optional[Any] ) -> str:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __snake_case ( self : List[Any] ) -> Any:
__snake_case : Any = torch.manual_seed(0 )
__snake_case : List[Any] = DiTPipeline.from_pretrained("facebook/DiT-XL-2-256" )
pipe.to("cuda" )
__snake_case : Optional[int] = ["vase", "umbrella", "white shark", "white wolf"]
__snake_case : Optional[Any] = pipe.get_label_ids(lowerCamelCase )
__snake_case : List[Any] = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=40 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : int = load_numpy(
F'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def __snake_case ( self : Union[str, Any] ) -> int:
__snake_case : Any = DiTPipeline.from_pretrained("facebook/DiT-XL-2-512" )
__snake_case : int = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to("cuda" )
__snake_case : Tuple = ["vase", "umbrella"]
__snake_case : List[str] = pipe.get_label_ids(lowerCamelCase )
__snake_case : Tuple = torch.manual_seed(0 )
__snake_case : str = pipe(lowerCamelCase , generator=lowerCamelCase , num_inference_steps=25 , output_type="np" ).images
for word, image in zip(lowerCamelCase , lowerCamelCase ):
__snake_case : List[Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
F'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 123 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(lowerCAmelCase__ , 'num_encoder_blocks' ) )
class SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : str=1_3 , UpperCamelCase__ : int=6_4 , UpperCamelCase__ : Dict=3 , UpperCamelCase__ : List[str]=4 , UpperCamelCase__ : List[Any]=[2, 2, 2, 2] , UpperCamelCase__ : int=[8, 4, 2, 1] , UpperCamelCase__ : Tuple=[1_6, 3_2, 6_4, 1_2_8] , UpperCamelCase__ : Union[str, Any]=[1, 4, 8, 1_6] , UpperCamelCase__ : Union[str, Any]=[1, 2, 4, 8] , UpperCamelCase__ : Any=True , UpperCamelCase__ : List[Any]=True , UpperCamelCase__ : Optional[int]="gelu" , UpperCamelCase__ : int=0.1 , UpperCamelCase__ : Tuple=0.1 , UpperCamelCase__ : Dict=0.0_2 , UpperCamelCase__ : Optional[int]=3 , UpperCamelCase__ : int=None , ):
"""simple docstring"""
UpperCamelCase = parent
UpperCamelCase = batch_size
UpperCamelCase = image_size
UpperCamelCase = num_channels
UpperCamelCase = num_encoder_blocks
UpperCamelCase = sr_ratios
UpperCamelCase = depths
UpperCamelCase = hidden_sizes
UpperCamelCase = downsampling_rates
UpperCamelCase = num_attention_heads
UpperCamelCase = is_training
UpperCamelCase = use_labels
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = num_labels
UpperCamelCase = scope
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCamelCase = None
if self.use_labels:
UpperCamelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCamelCase = self.get_config()
return config, pixel_values, labels
def A ( self : str ):
"""simple docstring"""
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A ( self : str , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : Any , UpperCamelCase__ : Dict ):
"""simple docstring"""
UpperCamelCase = SegformerModel(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(lowerCAmelCase__ )
UpperCamelCase = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A ( self : List[str] , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : str , UpperCamelCase__ : Tuple ):
"""simple docstring"""
UpperCamelCase = self.num_labels
UpperCamelCase = SegformerForSemanticSegmentation(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = model(lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCamelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self : List[str] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : Dict , UpperCamelCase__ : int ):
"""simple docstring"""
UpperCamelCase = 1
UpperCamelCase = SegformerForSemanticSegmentation(config=lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
UpperCamelCase = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(lowerCAmelCase__ )
UpperCamelCase = model(lowerCAmelCase__ , labels=lowerCAmelCase__ )
self.parent.assertGreater(result.loss , 0.0 )
def A ( self : Optional[int] ):
"""simple docstring"""
UpperCamelCase = self.prepare_config_and_inputs()
UpperCamelCase = config_and_inputs
UpperCamelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE ( _a , _a , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
_SCREAMING_SNAKE_CASE = False
def A ( self : Tuple ):
"""simple docstring"""
UpperCamelCase = SegformerModelTester(self )
UpperCamelCase = SegformerConfigTester(self , config_class=lowerCAmelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def A ( self : List[str] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*lowerCAmelCase__ )
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*lowerCAmelCase__ )
@unittest.skip('SegFormer does not use inputs_embeds' )
def A ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def A ( self : int ):
"""simple docstring"""
pass
def A ( self : int ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = model_class(lowerCAmelCase__ )
UpperCamelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase = [*signature.parameters.keys()]
UpperCamelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase__ )
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
UpperCamelCase = True
UpperCamelCase = False
UpperCamelCase = True
UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCamelCase = outputs.attentions
UpperCamelCase = sum(self.model_tester.depths )
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCamelCase = True
UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first attentions (first block, first layer)
UpperCamelCase = (self.model_tester.image_size // 4) ** 2
UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCamelCase = (self.model_tester.image_size // 3_2) ** 2
UpperCamelCase = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCamelCase = len(lowerCAmelCase__ )
# Check attention is always last and order is fine
UpperCamelCase = True
UpperCamelCase = True
UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
self.assertEqual(out_len + 1 , len(lowerCAmelCase__ ) )
UpperCamelCase = outputs.attentions
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first attentions (first block, first layer)
UpperCamelCase = (self.model_tester.image_size // 4) ** 2
UpperCamelCase = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A ( self : List[str] ):
"""simple docstring"""
def check_hidden_states_output(UpperCamelCase__ : Optional[int] , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any ):
UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.eval()
with torch.no_grad():
UpperCamelCase = model(**self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ ) )
UpperCamelCase = outputs.hidden_states
UpperCamelCase = self.model_tester.num_encoder_blocks
self.assertEqual(len(lowerCAmelCase__ ) , lowerCAmelCase__ )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCamelCase = True
check_hidden_states_output(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
def A ( self : Optional[int] ):
"""simple docstring"""
if not self.model_tester.is_training:
return
UpperCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase__ ):
continue
UpperCamelCase = model_class(lowerCAmelCase__ )
model.to(lowerCAmelCase__ )
model.train()
UpperCamelCase = self._prepare_for_class(lowerCAmelCase__ , lowerCAmelCase__ , return_labels=lowerCAmelCase__ )
UpperCamelCase = model(**lowerCAmelCase__ ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def A ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def A ( self : int ):
"""simple docstring"""
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase = SegformerModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def __lowerCamelCase ( ) -> Any:
"""simple docstring"""
UpperCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Dict ):
"""simple docstring"""
UpperCamelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCAmelCase__ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
UpperCamelCase = model(lowerCAmelCase__ )
UpperCamelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCamelCase = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-4 ) )
@slow
def A ( self : Union[str, Any] ):
"""simple docstring"""
UpperCamelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(lowerCAmelCase__ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
UpperCamelCase = model(lowerCAmelCase__ )
UpperCamelCase = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase__ )
UpperCamelCase = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(lowerCAmelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , lowerCAmelCase__ , atol=1E-1 ) )
@slow
def A ( self : List[Any] ):
"""simple docstring"""
UpperCamelCase = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=lowerCAmelCase__ , align=lowerCAmelCase__ , do_random_crop=lowerCAmelCase__ )
UpperCamelCase = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
lowerCAmelCase__ )
UpperCamelCase = prepare_img()
UpperCamelCase = image_processor(images=lowerCAmelCase__ , return_tensors='pt' )
UpperCamelCase = encoded_inputs.pixel_values.to(lowerCAmelCase__ )
with torch.no_grad():
UpperCamelCase = model(lowerCAmelCase__ )
UpperCamelCase = outputs.logits.detach().cpu()
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ , target_sizes=[(5_0_0, 3_0_0)] )
UpperCamelCase = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
UpperCamelCase = image_processor.post_process_semantic_segmentation(outputs=lowerCAmelCase__ )
UpperCamelCase = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , lowerCAmelCase__ )
| 367 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase : Tuple = {
"configuration_convnext": ["CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvNextConfig", "ConvNextOnnxConfig"]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Tuple = ["ConvNextFeatureExtractor"]
_lowerCamelCase : Optional[Any] = ["ConvNextImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : int = [
"CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"ConvNextForImageClassification",
"ConvNextModel",
"ConvNextPreTrainedModel",
"ConvNextBackbone",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"TFConvNextForImageClassification",
"TFConvNextModel",
"TFConvNextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
_lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure)
| 249 | 0 |
import os
from collections.abc import Iterator
def lowerCamelCase__ ( snake_case_ : str = "." ) -> Iterator[str]:
for dir_path, dir_names, filenames in os.walk(snake_case_ ):
__snake_case = [d for d in dir_names if d != '''scripts''' and d[0] not in '''._''']
for filename in filenames:
if filename == "__init__.py":
continue
if os.path.splitext(snake_case_ )[1] in (".py", ".ipynb"):
yield os.path.join(snake_case_ , snake_case_ ).lstrip('''./''' )
def lowerCamelCase__ ( snake_case_ : Tuple ) -> List[str]:
return f"""{i * ' '}*""" if i else "\n##"
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> str:
__snake_case = old_path.split(os.sep )
for i, new_part in enumerate(new_path.split(os.sep ) ):
if (i + 1 > len(snake_case_ ) or old_parts[i] != new_part) and new_part:
print(f"""{md_prefix(snake_case_ )} {new_part.replace('_' , ' ' ).title()}""" )
return new_path
def lowerCamelCase__ ( snake_case_ : str = "." ) -> None:
__snake_case = ''''''
for filepath in sorted(good_file_paths(snake_case_ ) ):
__snake_case , __snake_case = os.path.split(snake_case_ )
if filepath != old_path:
__snake_case = print_path(snake_case_ , snake_case_ )
__snake_case = (filepath.count(os.sep ) + 1) if filepath else 0
__snake_case = f"""{filepath}/{filename}""".replace(''' ''' , '''%20''' )
__snake_case = os.path.splitext(filename.replace('''_''' , ''' ''' ).title() )[0]
print(f"""{md_prefix(snake_case_ )} [{filename}]({url})""" )
if __name__ == "__main__":
print_directory_md('.')
| 24 |
import socket
def lowerCamelCase__ ( ) -> Any:
__snake_case = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
__snake_case = socket.gethostname()
__snake_case = 1_2312
sock.connect((host, port) )
sock.send(B'''Hello server!''' )
with open('''Received_file''' , '''wb''' ) as out_file:
print('''File opened''' )
print('''Receiving data...''' )
while True:
__snake_case = sock.recv(1024 )
if not data:
break
out_file.write(snake_case_ )
print('''Successfully received the file''' )
sock.close()
print('''Connection closed''' )
if __name__ == "__main__":
main()
| 24 | 1 |
'''simple docstring'''
import random
def __lowercase ( __lowercase , __lowercase , __lowercase = False ) -> dict:
'''simple docstring'''
_A = {i: [] for i in range(_snake_case )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_snake_case )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_snake_case ):
for j in range(i + 1 , _snake_case ):
if random.random() < probability:
graph[i].append(_snake_case )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_snake_case )
return graph
def __lowercase ( __lowercase ) -> dict:
'''simple docstring'''
return {
i: [j for j in range(_snake_case ) if i != j] for i in range(_snake_case )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
lowerCamelCase_ = {
'''configuration_trocr''': ['''TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''TrOCRConfig'''],
'''processing_trocr''': ['''TrOCRProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''TROCR_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TrOCRForCausalLM''',
'''TrOCRPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 174 | 0 |
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list:
if n_term == "":
return []
lowerCamelCase__ : list = []
for temp in range(int(_UpperCAmelCase ) ):
series.append(F"""1/{temp + 1}""" if series else '1' )
return series
if __name__ == "__main__":
_UpperCAmelCase : Tuple = input("""Enter the last number (nth term) of the Harmonic Series""")
print("""Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n""")
print(harmonic_series(nth_term))
| 50 |
import argparse
import json
import numpy
import torch
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def _a ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
UpperCamelCase__ : List[Any] = torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )
UpperCamelCase__ : str = chkpt['''model''']
# We have the base model one level deeper than the original XLM repository
UpperCamelCase__ : int = {}
for k, v in state_dict.items():
if "pred_layer" in k:
UpperCamelCase__ : Optional[int] = v
else:
UpperCamelCase__ : Tuple = v
UpperCamelCase__ : Union[str, Any] = chkpt['''params''']
UpperCamelCase__ : Optional[Any] = {n: v for n, v in config.items() if not isinstance(SCREAMING_SNAKE_CASE , (torch.FloatTensor, numpy.ndarray) )}
UpperCamelCase__ : Dict = chkpt['''dico_word2id''']
UpperCamelCase__ : Dict = {s + '''</w>''' if s.find('''@@''' ) == -1 and i > 13 else s.replace('''@@''' , '''''' ): i for s, i in vocab.items()}
# Save pytorch-model
UpperCamelCase__ : List[str] = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCamelCase__ : Optional[Any] = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCamelCase__ : Any = pytorch_dump_folder_path + '''/''' + VOCAB_FILES_NAMES['''vocab_file''']
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' )
print(F"Save vocab file to {pytorch_config_dump_path}" )
with open(SCREAMING_SNAKE_CASE , '''w''' , encoding='''utf-8''' ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE , indent=2 ) + '''\n''' )
if __name__ == "__main__":
__UpperCamelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__UpperCamelCase : Optional[int] = parser.parse_args()
convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
| 146 | 0 |
import os
import sys
a_ = os.path.join(os.path.dirname(__file__), 'src')
sys.path.append(SRC_DIR)
from transformers import (
AutoConfig,
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForQuestionAnswering,
AutoModelForSequenceClassification,
AutoTokenizer,
add_start_docstrings,
)
a_ = [
'torch',
'numpy',
'tokenizers',
'filelock',
'requests',
'tqdm',
'regex',
'sentencepiece',
'sacremoses',
'importlib_metadata',
'huggingface_hub',
]
@add_start_docstrings(AutoConfig.__doc__ )
def __lowercase ( *lowerCamelCase : Tuple , **lowerCamelCase : Union[str, Any] ):
return AutoConfig.from_pretrained(*lowerCamelCase , **lowerCamelCase )
@add_start_docstrings(AutoTokenizer.__doc__ )
def __lowercase ( *lowerCamelCase : Dict , **lowerCamelCase : List[str] ):
return AutoTokenizer.from_pretrained(*lowerCamelCase , **lowerCamelCase )
@add_start_docstrings(AutoModel.__doc__ )
def __lowercase ( *lowerCamelCase : int , **lowerCamelCase : List[str] ):
return AutoModel.from_pretrained(*lowerCamelCase , **lowerCamelCase )
@add_start_docstrings(AutoModelForCausalLM.__doc__ )
def __lowercase ( *lowerCamelCase : Optional[int] , **lowerCamelCase : int ):
return AutoModelForCausalLM.from_pretrained(*lowerCamelCase , **lowerCamelCase )
@add_start_docstrings(AutoModelForMaskedLM.__doc__ )
def __lowercase ( *lowerCamelCase : Any , **lowerCamelCase : Union[str, Any] ):
return AutoModelForMaskedLM.from_pretrained(*lowerCamelCase , **lowerCamelCase )
@add_start_docstrings(AutoModelForSequenceClassification.__doc__ )
def __lowercase ( *lowerCamelCase : Optional[int] , **lowerCamelCase : Optional[Any] ):
return AutoModelForSequenceClassification.from_pretrained(*lowerCamelCase , **lowerCamelCase )
@add_start_docstrings(AutoModelForQuestionAnswering.__doc__ )
def __lowercase ( *lowerCamelCase : Optional[int] , **lowerCamelCase : Any ):
return AutoModelForQuestionAnswering.from_pretrained(*lowerCamelCase , **lowerCamelCase )
| 50 | def __lowercase ( lowerCamelCase : list[int] ):
if not numbers:
return 0
if not isinstance(lowerCamelCase , (list, tuple) ) or not all(
isinstance(lowerCamelCase , lowerCamelCase ) for number in numbers ):
raise ValueError('numbers must be an iterable of integers' )
UpperCamelCase_ : Optional[Any] = numbers[0]
for i in range(1 , len(lowerCamelCase ) ):
# update the maximum and minimum subarray products
UpperCamelCase_ : Tuple = numbers[i]
if number < 0:
UpperCamelCase_, UpperCamelCase_ : List[str] = min_till_now, max_till_now
UpperCamelCase_ : List[str] = max(lowerCamelCase , max_till_now * number )
UpperCamelCase_ : Dict = min(lowerCamelCase , min_till_now * number )
# update the maximum product found till now
UpperCamelCase_ : List[str] = max(lowerCamelCase , lowerCamelCase )
return max_prod
| 50 | 1 |
def a__ ( A_, A_ ):
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__magic_name__ = str(bin(A_ ) )[2:] # remove the leading "0b"
__magic_name__ = str(bin(A_ ) )[2:] # remove the leading "0b"
__magic_name__ = max(len(A_ ), len(A_ ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(A_ ), b_binary.zfill(A_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 88 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_snake_case : Union[str, Any] = {
'configuration_encodec': [
'ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP',
'EncodecConfig',
],
'feature_extraction_encodec': ['EncodecFeatureExtractor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case : int = [
'ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST',
'EncodecModel',
'EncodecPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_snake_case : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 292 | 0 |
"""simple docstring"""
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Dict=1_3 , SCREAMING_SNAKE_CASE_ : List[Any]=7 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : Optional[Any]=True , SCREAMING_SNAKE_CASE_ : str=True , SCREAMING_SNAKE_CASE_ : List[str]=9_9 , SCREAMING_SNAKE_CASE_ : int=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=3_6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Tuple=6 , SCREAMING_SNAKE_CASE_ : List[Any]=6 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=3_7 , SCREAMING_SNAKE_CASE_ : Tuple="gelu" , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.1 , SCREAMING_SNAKE_CASE_ : int=0.1 , SCREAMING_SNAKE_CASE_ : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE_ : List[str]=1_6 , SCREAMING_SNAKE_CASE_ : List[str]=2 , SCREAMING_SNAKE_CASE_ : List[Any]=0.02 , SCREAMING_SNAKE_CASE_ : Dict=3 , SCREAMING_SNAKE_CASE_ : int=4 , SCREAMING_SNAKE_CASE_ : Tuple=None , ):
lowerCAmelCase_ : Any = parent
lowerCAmelCase_ : Optional[int] = batch_size
lowerCAmelCase_ : Dict = seq_length
lowerCAmelCase_ : Tuple = is_training
lowerCAmelCase_ : str = use_input_mask
lowerCAmelCase_ : Union[str, Any] = use_token_type_ids
lowerCAmelCase_ : Tuple = use_labels
lowerCAmelCase_ : Optional[int] = vocab_size
lowerCAmelCase_ : Any = embedding_size
lowerCAmelCase_ : Optional[Any] = hidden_size
lowerCAmelCase_ : str = num_hidden_layers
lowerCAmelCase_ : Optional[Any] = num_hidden_groups
lowerCAmelCase_ : Dict = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : Any = hidden_act
lowerCAmelCase_ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase_ : int = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : List[Any] = type_vocab_size
lowerCAmelCase_ : Any = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Tuple = num_labels
lowerCAmelCase_ : Dict = num_choices
lowerCAmelCase_ : Tuple = scope
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : int = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase_ : List[Any] = None
if self.use_token_type_ids:
lowerCAmelCase_ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : List[str] = None
lowerCAmelCase_ : Union[str, Any] = None
lowerCAmelCase_ : str = None
if self.use_labels:
lowerCAmelCase_ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : str ):
lowerCAmelCase_ : Union[str, Any] = AlbertModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : List[str] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Tuple ):
lowerCAmelCase_ : Optional[Any] = AlbertForPreTraining(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , sentence_order_label=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : str = AlbertForMaskedLM(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Any] ):
lowerCAmelCase_ : List[str] = AlbertForQuestionAnswering(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Any = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , start_positions=SCREAMING_SNAKE_CASE_ , end_positions=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : List[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict ):
lowerCAmelCase_ : Union[str, Any] = self.num_labels
lowerCAmelCase_ : Union[str, Any] = AlbertForSequenceClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : List[str] = self.num_labels
lowerCAmelCase_ : List[Any] = AlbertForTokenClassification(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : str = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[str, Any] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : List[str] ):
lowerCAmelCase_ : Optional[Any] = self.num_choices
lowerCAmelCase_ : int = AlbertForMultipleChoice(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase_ : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase_ : List[Any] = model(
SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ , token_type_ids=SCREAMING_SNAKE_CASE_ , labels=SCREAMING_SNAKE_CASE_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Optional[int] = self.prepare_config_and_inputs()
(
lowerCAmelCase_
) : Optional[int] = config_and_inputs
lowerCAmelCase_ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase_, lowercase_, unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : Dict , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=False ):
lowerCAmelCase_ : List[str] = super()._prepare_for_class(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , return_labels=SCREAMING_SNAKE_CASE_ )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ : List[str] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE_ )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = AlbertModelTester(self )
lowerCAmelCase_ : Optional[int] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE_ , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : str ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*SCREAMING_SNAKE_CASE_ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
lowerCAmelCase_ : int = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Optional[Any] = AlbertModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
lowerCAmelCase_ : Any = AlbertModel.from_pretrained('albert-base-v2' )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
lowerCAmelCase_ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : List[Any] = model(SCREAMING_SNAKE_CASE_ , attention_mask=SCREAMING_SNAKE_CASE_ )[0]
lowerCAmelCase_ : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : int = torch.tensor(
[[[-0.65_13, 1.50_35, -0.27_66], [-0.65_15, 1.50_46, -0.27_80], [-0.65_12, 1.50_49, -0.27_84]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , SCREAMING_SNAKE_CASE_ , atol=1E-4 ) )
| 358 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : int ):
# test for the above condition
self.test()
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
lowerCAmelCase_ : Any = 0
lowerCAmelCase_ : Optional[Any] = False
while not completed:
if counter == 1:
self.reset()
lowerCAmelCase_ : Any = self.advance()
if not self.does_advance(SCREAMING_SNAKE_CASE_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : List[str] = self.update(SCREAMING_SNAKE_CASE_ )
counter += 1
if counter > 1_0_0_0_0:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : str ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
@abstractmethod
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[int]=False ):
raise NotImplementedError(
F"{self.__class__} is an abstract class. Only classes inheriting this class can be called." )
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : List[int] ):
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(F"`token_ids` has to be a non-empty list, but is {token_ids}." )
if any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F"Each list in `token_ids` has to be a list of positive integers, but is {token_ids}." )
lowerCAmelCase_ : Union[str, Any] = token_ids
lowerCAmelCase_ : Union[str, Any] = len(self.token_ids )
lowerCAmelCase_ : Union[str, Any] = -1 # the index of the currently fulfilled step
lowerCAmelCase_ : Dict = False
def SCREAMING_SNAKE_CASE__ ( self : int ):
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` has to be an `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : Union[str, Any] = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.fulfilled_idx += 1
lowerCAmelCase_ : Optional[int] = True
if self.fulfilled_idx == (self.seqlen - 1):
lowerCAmelCase_ : List[str] = True
lowerCAmelCase_ : List[str] = completed
else:
# failed to make progress.
lowerCAmelCase_ : Optional[Any] = True
self.reset()
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : int = 0
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
return self.seqlen - (self.fulfilled_idx + 1)
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple=False ):
lowerCAmelCase_ : Any = PhrasalConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ : int = self.seqlen
lowerCAmelCase_ : Dict = self.fulfilled_idx
lowerCAmelCase_ : Optional[Any] = self.completed
return new_constraint
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[List[int]] , SCREAMING_SNAKE_CASE_ : Union[str, Any]=True ):
lowerCAmelCase_ : Tuple = max([len(SCREAMING_SNAKE_CASE_ ) for one in nested_token_ids] )
lowerCAmelCase_ : Optional[Any] = {}
for token_ids in nested_token_ids:
lowerCAmelCase_ : Union[str, Any] = root
for tidx, token_id in enumerate(SCREAMING_SNAKE_CASE_ ):
if token_id not in level:
lowerCAmelCase_ : Dict = {}
lowerCAmelCase_ : Tuple = level[token_id]
if no_subsets and self.has_subsets(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F" {nested_token_ids}." )
lowerCAmelCase_ : Union[str, Any] = root
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : str = self.trie
for current_token in current_seq:
lowerCAmelCase_ : Optional[int] = start[current_token]
lowerCAmelCase_ : Dict = list(start.keys() )
return next_tokens
def SCREAMING_SNAKE_CASE__ ( self : str , SCREAMING_SNAKE_CASE_ : Any ):
lowerCAmelCase_ : Any = self.next_tokens(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) == 0
def SCREAMING_SNAKE_CASE__ ( self : int , SCREAMING_SNAKE_CASE_ : int ):
lowerCAmelCase_ : Tuple = list(root.values() )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return 1
else:
return sum([self.count_leaves(SCREAMING_SNAKE_CASE_ ) for nn in next_nodes] )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] ):
lowerCAmelCase_ : Any = self.count_leaves(SCREAMING_SNAKE_CASE_ )
return len(SCREAMING_SNAKE_CASE_ ) != leaf_count
class UpperCamelCase__ ( lowercase_ ):
"""simple docstring"""
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE_ : List[List[int]] ):
super(SCREAMING_SNAKE_CASE_ , self ).__init__()
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(F"`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}." )
if any(not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for token_ids in nested_token_ids ):
raise ValueError(F"`nested_token_ids` has to be a list of lists, but is {nested_token_ids}." )
if any(
any((not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F"Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}." )
lowerCAmelCase_ : Dict = DisjunctiveTrie(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Optional[int] = nested_token_ids
lowerCAmelCase_ : Tuple = self.trie.max_height
lowerCAmelCase_ : Union[str, Any] = []
lowerCAmelCase_ : Optional[Any] = False
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : int = self.trie.next_tokens(self.current_seq )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : Optional[int] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def SCREAMING_SNAKE_CASE__ ( self : Any , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` is supposed to be type `int`, but is {token_id} of type {type(SCREAMING_SNAKE_CASE_ )}" )
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[Any] = False
if self.does_advance(SCREAMING_SNAKE_CASE_ ):
self.current_seq.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = True
else:
lowerCAmelCase_ : List[str] = True
self.reset()
lowerCAmelCase_ : Dict = self.trie.reached_leaf(self.current_seq )
lowerCAmelCase_ : List[str] = completed
return stepped, completed, reset
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Optional[Any] = []
def SCREAMING_SNAKE_CASE__ ( self : str ):
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def SCREAMING_SNAKE_CASE__ ( self : Tuple , SCREAMING_SNAKE_CASE_ : Dict=False ):
lowerCAmelCase_ : Dict = DisjunctiveConstraint(self.token_ids )
if stateful:
lowerCAmelCase_ : Dict = self.seqlen
lowerCAmelCase_ : Optional[Any] = self.current_seq
lowerCAmelCase_ : List[Any] = self.completed
return new_constraint
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE_ : List[Constraint] ):
lowerCAmelCase_ : Optional[int] = constraints
# max # of steps required to fulfill a given constraint
lowerCAmelCase_ : Optional[int] = max([c.seqlen for c in constraints] )
lowerCAmelCase_ : List[str] = len(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[str] = False
self.init_state()
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
lowerCAmelCase_ : Tuple = []
lowerCAmelCase_ : int = None
lowerCAmelCase_ : Optional[int] = [constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.constraints]
def SCREAMING_SNAKE_CASE__ ( self : int ):
lowerCAmelCase_ : Optional[Any] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
lowerCAmelCase_ : List[str] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
lowerCAmelCase_ : List[Any] = constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase_ : Dict = self.inprogress_constraint.advance()
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.append(SCREAMING_SNAKE_CASE_ )
elif isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
token_list.extend(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return None
else:
return token_list
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[List[int]] ):
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
lowerCAmelCase_ ,lowerCAmelCase_ : Union[str, Any] = self.add(SCREAMING_SNAKE_CASE_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def SCREAMING_SNAKE_CASE__ ( self : Dict , SCREAMING_SNAKE_CASE_ : int ):
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError(F"`token_id` should be an `int`, but is `{token_id}`." )
lowerCAmelCase_ ,lowerCAmelCase_ : Tuple = False, False
if self.completed:
lowerCAmelCase_ : Any = True
lowerCAmelCase_ : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : Any = self.inprogress_constraint.update(SCREAMING_SNAKE_CASE_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase_ : Optional[int] = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
lowerCAmelCase_ : str = None
if len(self.pending_constraints ) == 0:
# we're done!
lowerCAmelCase_ : Union[str, Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ : str = pending_constraint.update(SCREAMING_SNAKE_CASE_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : Any = None
if not complete and stepped:
lowerCAmelCase_ : Optional[Any] = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
lowerCAmelCase_ : Optional[int] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
lowerCAmelCase_ : Optional[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , SCREAMING_SNAKE_CASE_ : str=True ):
lowerCAmelCase_ : List[Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
lowerCAmelCase_ : Any = [
constraint.copy(stateful=SCREAMING_SNAKE_CASE_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
lowerCAmelCase_ : List[str] = self.inprogress_constraint.copy(stateful=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase_ : List[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 289 | 0 |
from __future__ import annotations
import typing
from collections import Counter
def lowerCamelCase__ ( __lowerCAmelCase : int ):
"""simple docstring"""
lowerCAmelCase_ = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(__lowerCAmelCase , max_perimeter + 1 ):
lowerCAmelCase_ = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(__lowerCAmelCase ):
lowerCAmelCase_ = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def lowerCamelCase__ ( __lowerCAmelCase : int = 1000 ):
"""simple docstring"""
lowerCAmelCase_ = pythagorean_triple(__lowerCAmelCase )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"""Perimeter {solution()} has maximum solutions""")
| 231 |
from __future__ import annotations
import csv
import requests
from bsa import BeautifulSoup
def lowerCamelCase__ ( __lowerCAmelCase : str = "" ):
"""simple docstring"""
lowerCAmelCase_ = url or "https://www.imdb.com/chart/top/?ref_=nv_mv_250"
lowerCAmelCase_ = BeautifulSoup(requests.get(__lowerCAmelCase ).text , "html.parser" )
lowerCAmelCase_ = soup.find_all("td" , attrs="titleColumn" )
lowerCAmelCase_ = soup.find_all("td" , class_="ratingColumn imdbRating" )
return {
title.a.text: float(rating.strong.text )
for title, rating in zip(__lowerCAmelCase , __lowerCAmelCase )
}
def lowerCamelCase__ ( __lowerCAmelCase : str = "IMDb_Top_250_Movies.csv" ):
"""simple docstring"""
lowerCAmelCase_ = get_imdb_top_aaa_movies()
with open(__lowerCAmelCase , "w" , newline="" ) as out_file:
lowerCAmelCase_ = csv.writer(__lowerCAmelCase )
writer.writerow(["Movie title", "IMDb rating"] )
for title, rating in movies.items():
writer.writerow([title, rating] )
if __name__ == "__main__":
write_movies()
| 231 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,UpperCAmelCase_ ,UpperCAmelCase_=7 ,UpperCAmelCase_=3 ,UpperCAmelCase_=18 ,UpperCAmelCase_=30 ,UpperCAmelCase_=4_00 ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=None ,UpperCAmelCase_=True ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,UpperCAmelCase_=[0.5, 0.5, 0.5] ,):
_lowercase : Tuple = size if size is not None else {"""shortest_edge""": 18}
_lowercase : int = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_lowercase : Optional[Any] = parent
_lowercase : Tuple = batch_size
_lowercase : Optional[int] = num_channels
_lowercase : Tuple = image_size
_lowercase : Dict = min_resolution
_lowercase : int = max_resolution
_lowercase : Optional[int] = do_resize
_lowercase : Any = size
_lowercase : str = do_center_crop
_lowercase : Optional[Any] = crop_size
_lowercase : Union[str, Any] = do_normalize
_lowercase : Union[str, Any] = image_mean
_lowercase : Tuple = image_std
def lowerCamelCase__ ( self ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class UpperCamelCase ( snake_case , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase__ ( self ):
_lowercase : Tuple = LevitImageProcessingTester(self )
@property
def lowerCamelCase__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase__ ( self ):
_lowercase : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_mean""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""image_std""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_normalize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_resize""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""do_center_crop""" ) )
self.assertTrue(hasattr(UpperCAmelCase_ ,"""size""" ) )
def lowerCamelCase__ ( self ):
_lowercase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
_lowercase : Any = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def lowerCamelCase__ ( self ):
pass
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,Image.Image )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : Optional[int] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_lowercase : str = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,numpify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,np.ndarray )
# Test not batched input
_lowercase : List[Any] = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : Optional[Any] = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def lowerCamelCase__ ( self ):
# Initialize image_processing
_lowercase : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_lowercase : List[Any] = prepare_image_inputs(self.image_processor_tester ,equal_resolution=UpperCAmelCase_ ,torchify=UpperCAmelCase_ )
for image in image_inputs:
self.assertIsInstance(UpperCAmelCase_ ,torch.Tensor )
# Test not batched input
_lowercase : str = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
_lowercase : int = image_processing(UpperCAmelCase_ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 336 |
"""simple docstring"""
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 336 | 1 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'microsoft/unispeech-large-1500h-cv': (
'https://huggingface.co/microsoft/unispeech-large-1500h-cv/resolve/main/config.json'
),
# See all UniSpeech models at https://huggingface.co/models?filter=unispeech
}
class UpperCAmelCase ( _lowerCamelCase ):
A__ : Dict = "unispeech"
def __init__(self : int , snake_case__ : Any=32 , snake_case__ : Optional[int]=7_68 , snake_case__ : Union[str, Any]=12 , snake_case__ : Optional[int]=12 , snake_case__ : List[Any]=30_72 , snake_case__ : Tuple="gelu" , snake_case__ : Tuple=0.1 , snake_case__ : Optional[Any]=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]=0.0 , snake_case__ : List[str]=0.0 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : List[str]=0.02 , snake_case__ : str=1e-5 , snake_case__ : Optional[int]="group" , snake_case__ : Union[str, Any]="gelu" , snake_case__ : List[Any]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , snake_case__ : List[str]=(5, 2, 2, 2, 2, 2, 2) , snake_case__ : Dict=(10, 3, 3, 3, 3, 2, 2) , snake_case__ : Optional[Any]=False , snake_case__ : Optional[Any]=1_28 , snake_case__ : Any=16 , snake_case__ : Optional[int]=False , snake_case__ : List[str]=True , snake_case__ : Optional[int]=0.05 , snake_case__ : Dict=10 , snake_case__ : int=2 , snake_case__ : Optional[int]=0.0 , snake_case__ : Optional[Any]=10 , snake_case__ : Dict=0 , snake_case__ : str=3_20 , snake_case__ : Dict=2 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=1_00 , snake_case__ : Tuple=2_56 , snake_case__ : str=2_56 , snake_case__ : Union[str, Any]=0.1 , snake_case__ : Any="mean" , snake_case__ : Union[str, Any]=False , snake_case__ : Tuple=False , snake_case__ : Tuple=2_56 , snake_case__ : Tuple=80 , snake_case__ : int=0 , snake_case__ : List[str]=1 , snake_case__ : Optional[int]=2 , snake_case__ : Optional[Any]=0.5 , **snake_case__ : Dict , ) -> Tuple:
'''simple docstring'''
super().__init__(**lowercase_ , pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ )
snake_case : int = hidden_size
snake_case : int = feat_extract_norm
snake_case : Optional[int] = feat_extract_activation
snake_case : str = list(lowercase_ )
snake_case : Any = list(lowercase_ )
snake_case : Optional[Any] = list(lowercase_ )
snake_case : str = conv_bias
snake_case : Optional[Any] = num_conv_pos_embeddings
snake_case : List[str] = num_conv_pos_embedding_groups
snake_case : Tuple = len(self.conv_dim )
snake_case : Any = num_hidden_layers
snake_case : List[str] = intermediate_size
snake_case : Optional[int] = hidden_act
snake_case : Tuple = num_attention_heads
snake_case : Any = hidden_dropout
snake_case : Dict = attention_dropout
snake_case : int = activation_dropout
snake_case : Dict = feat_proj_dropout
snake_case : Optional[int] = final_dropout
snake_case : Any = layerdrop
snake_case : List[Any] = layer_norm_eps
snake_case : int = initializer_range
snake_case : List[str] = num_ctc_classes
snake_case : Tuple = vocab_size
snake_case : Dict = do_stable_layer_norm
snake_case : List[str] = use_weighted_layer_sum
snake_case : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
snake_case : Optional[int] = apply_spec_augment
snake_case : Tuple = mask_time_prob
snake_case : Optional[int] = mask_time_length
snake_case : Optional[int] = mask_time_min_masks
snake_case : List[str] = mask_feature_prob
snake_case : int = mask_feature_length
snake_case : Optional[int] = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
snake_case : Tuple = num_codevectors_per_group
snake_case : List[Any] = num_codevector_groups
snake_case : List[str] = contrastive_logits_temperature
snake_case : int = feat_quantizer_dropout
snake_case : Optional[int] = num_negatives
snake_case : Optional[Any] = codevector_dim
snake_case : int = proj_codevector_dim
snake_case : Optional[Any] = diversity_loss_weight
# ctc loss
snake_case : int = ctc_loss_reduction
snake_case : int = ctc_zero_infinity
# pretraining loss
snake_case : Optional[int] = replace_prob
@property
def _SCREAMING_SNAKE_CASE (self : str ) -> Optional[Any]:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 59 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a ( _lowerCamelCase ):
snake_case_ = 42
@flax_register_to_config
class a ( nn.Module , _lowerCamelCase , _lowerCamelCase ):
snake_case_ = 32
snake_case_ = 4
snake_case_ = 4
snake_case_ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
snake_case_ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
snake_case_ = False
snake_case_ = (320, 640, 1_280, 1_280)
snake_case_ = 2
snake_case_ = 8
snake_case_ = None
snake_case_ = 1_280
snake_case_ = 0.0
snake_case_ = False
snake_case_ = jnp.floataa
snake_case_ = True
snake_case_ = 0
snake_case_ = False
def A_ ( self : Optional[int] , lowercase_ : jax.random.KeyArray ):
# init input tensors
snake_case_ = (1, self.in_channels, self.sample_size, self.sample_size)
snake_case_ = jnp.zeros(lowercase_ , dtype=jnp.floataa )
snake_case_ = jnp.ones((1,) , dtype=jnp.intaa )
snake_case_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
snake_case_ ,snake_case_ = jax.random.split(lowercase_ )
snake_case_ = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(lowercase_ , lowercase_ , lowercase_ , lowercase_ )["params"]
def A_ ( self : List[str] ):
snake_case_ = self.block_out_channels
snake_case_ = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'''At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.''' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
snake_case_ = self.num_attention_heads or self.attention_head_dim
# input
snake_case_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
snake_case_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
snake_case_ = FlaxTimestepEmbedding(lowercase_ , dtype=self.dtype )
snake_case_ = self.only_cross_attention
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = (num_attention_heads,) * len(self.down_block_types )
# down
snake_case_ = []
snake_case_ = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
snake_case_ = output_channel
snake_case_ = block_out_channels[i]
snake_case_ = i == len(lowercase_ ) - 1
if down_block_type == "CrossAttnDownBlock2D":
snake_case_ = FlaxCrossAttnDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxDownBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowercase_ )
snake_case_ = down_blocks
# mid
snake_case_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
snake_case_ = []
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = list(reversed(lowercase_ ) )
snake_case_ = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
snake_case_ = output_channel
snake_case_ = reversed_block_out_channels[i]
snake_case_ = reversed_block_out_channels[min(i + 1 , len(lowercase_ ) - 1 )]
snake_case_ = i == len(lowercase_ ) - 1
if up_block_type == "CrossAttnUpBlock2D":
snake_case_ = FlaxCrossAttnUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
snake_case_ = FlaxUpBlockaD(
in_channels=lowercase_ , out_channels=lowercase_ , prev_output_channel=lowercase_ , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(lowercase_ )
snake_case_ = output_channel
snake_case_ = up_blocks
# out
snake_case_ = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
snake_case_ = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Union[str, Any] , lowercase_ : Union[str, Any] , lowercase_ : int , lowercase_ : Any , lowercase_ : int=None , lowercase_ : Any=None , lowercase_ : bool = True , lowercase_ : bool = False , ):
# 1. time
if not isinstance(lowercase_ , jnp.ndarray ):
snake_case_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowercase_ , jnp.ndarray ) and len(timesteps.shape ) == 0:
snake_case_ = timesteps.astype(dtype=jnp.floataa )
snake_case_ = jnp.expand_dims(lowercase_ , 0 )
snake_case_ = self.time_proj(lowercase_ )
snake_case_ = self.time_embedding(lowercase_ )
# 2. pre-process
snake_case_ = jnp.transpose(lowercase_ , (0, 2, 3, 1) )
snake_case_ = self.conv_in(lowercase_ )
# 3. down
snake_case_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowercase_ , lowercase_ ):
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
else:
snake_case_ ,snake_case_ = down_block(lowercase_ , lowercase_ , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
snake_case_ = ()
for down_block_res_sample, down_block_additional_residual in zip(
lowercase_ , lowercase_ ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
snake_case_ = new_down_block_res_samples
# 4. mid
snake_case_ = self.mid_block(lowercase_ , lowercase_ , lowercase_ , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
snake_case_ = down_block_res_samples[-(self.layers_per_block + 1) :]
snake_case_ = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(lowercase_ , lowercase_ ):
snake_case_ = up_block(
lowercase_ , temb=lowercase_ , encoder_hidden_states=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train , )
else:
snake_case_ = up_block(lowercase_ , temb=lowercase_ , res_hidden_states_tuple=lowercase_ , deterministic=not train )
# 6. post-process
snake_case_ = self.conv_norm_out(lowercase_ )
snake_case_ = nn.silu(lowercase_ )
snake_case_ = self.conv_out(lowercase_ )
snake_case_ = jnp.transpose(lowercase_ , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=lowercase_ )
| 56 | 0 |
import os
def SCREAMING_SNAKE_CASE ( ) -> str:
with open(os.path.dirname(__UpperCamelCase ) + '''/grid.txt''' ) as f:
UpperCAmelCase_ = [] # noqa: E741
for _ in range(20 ):
l.append([int(__UpperCamelCase ) for x in f.readline().split()] )
UpperCAmelCase_ = 0
# right
for i in range(20 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# down
for i in range(17 ):
for j in range(20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
UpperCAmelCase_ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
UpperCAmelCase_ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
UpperCAmelCase_ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 177 |
from typing import Dict, Iterable, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : List[Any] = ['pixel_values']
def __init__( self : Any , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : bool = True , __snake_case : Dict[str, int] = None , __snake_case : bool = True , __snake_case : Union[int, float] = 1 / 2_55 , __snake_case : bool = True , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_MEAN , __snake_case : Optional[Union[float, Iterable[float]]] = IMAGENET_DEFAULT_STD , **__snake_case : Optional[Any] , ):
super().__init__(**__snake_case )
UpperCAmelCase_ = size if size is not None else {'''shortest_edge''': 2_24}
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : PILImageResampling = PILImageResampling.BICUBIC , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Tuple , ):
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
# size_dict is a dict with either keys "height" and "width" or "shortest_edge"
if "shortest_edge" in size:
UpperCAmelCase_ = int((2_56 / 2_24) * size['''shortest_edge'''] )
UpperCAmelCase_ = get_resize_output_image_size(__snake_case , size=__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = {'''height''': output_size[0], '''width''': output_size[1]}
if "height" not in size_dict or "width" not in size_dict:
raise ValueError(
F'Size dict must have keys \'height\' and \'width\' or \'shortest_edge\'. Got {size_dict.keys()}' )
return resize(
__snake_case , size=(size_dict['''height'''], size_dict['''width''']) , resample=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Tuple , __snake_case : np.ndarray , __snake_case : Dict[str, int] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Optional[int] , ):
UpperCAmelCase_ = get_size_dict(__snake_case )
if "height" not in size or "width" not in size:
raise ValueError(F'Size dict must have keys \'height\' and \'width\'. Got {size.keys()}' )
return center_crop(__snake_case , size=(size['''height'''], size['''width''']) , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : Optional[Any] , __snake_case : np.ndarray , __snake_case : Union[int, float] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : List[Any] , ):
return rescale(__snake_case , scale=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : np.ndarray , __snake_case : Union[float, List[float]] , __snake_case : Union[float, List[float]] , __snake_case : Optional[Union[str, ChannelDimension]] = None , **__snake_case : Union[str, Any] , ):
return normalize(__snake_case , mean=__snake_case , std=__snake_case , data_format=__snake_case , **__snake_case )
def lowerCamelCase_ ( self : int , __snake_case : ImageInput , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : PILImageResampling = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Dict[str, int]] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[float] = None , __snake_case : Optional[bool] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[Union[float, Iterable[float]]] = None , __snake_case : Optional[TensorType] = None , __snake_case : ChannelDimension = ChannelDimension.FIRST , **__snake_case : List[str] , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(__snake_case , default_to_square=__snake_case )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(__snake_case , param_name='''crop_size''' )
UpperCAmelCase_ = make_list_of_images(__snake_case )
if not valid_images(__snake_case ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(__snake_case ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(__snake_case , __snake_case , __snake_case ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(__snake_case , __snake_case ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(__snake_case , __snake_case ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(__snake_case , __snake_case , __snake_case ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(__snake_case , __snake_case ) for image in images]
UpperCAmelCase_ = {'''pixel_values''': images}
return BatchFeature(data=__snake_case , tensor_type=__snake_case )
| 177 | 1 |
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'''facebook/data2vec-base-960h''': '''https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json''',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class _lowerCamelCase ( a ):
"""simple docstring"""
UpperCAmelCase_ : str ="data2vec-audio"
def __init__( self , UpperCAmelCase=32 , UpperCAmelCase=768 , UpperCAmelCase=12 , UpperCAmelCase=12 , UpperCAmelCase=3072 , UpperCAmelCase="gelu" , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase=0.1 , UpperCAmelCase=0.02 , UpperCAmelCase=1E-5 , UpperCAmelCase="gelu" , UpperCAmelCase=(512, 512, 512, 512, 512, 512, 512) , UpperCAmelCase=(5, 2, 2, 2, 2, 2, 2) , UpperCAmelCase=(10, 3, 3, 3, 3, 2, 2) , UpperCAmelCase=False , UpperCAmelCase=16 , UpperCAmelCase=19 , UpperCAmelCase=5 , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase="sum" , UpperCAmelCase=False , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=(512, 512, 512, 512, 1500) , UpperCAmelCase=(5, 3, 3, 1, 1) , UpperCAmelCase=(1, 2, 3, 1, 1) , UpperCAmelCase=512 , UpperCAmelCase=0 , UpperCAmelCase=1 , UpperCAmelCase=2 , UpperCAmelCase=False , UpperCAmelCase=3 , UpperCAmelCase=2 , UpperCAmelCase=3 , UpperCAmelCase=None , **UpperCAmelCase , ) -> str:
'''simple docstring'''
super().__init__(**UpperCAmelCase , pad_token_id=UpperCAmelCase , bos_token_id=UpperCAmelCase , eos_token_id=UpperCAmelCase )
__snake_case : Optional[Any] = hidden_size
__snake_case : Union[str, Any] = feat_extract_activation
__snake_case : Dict = list(UpperCAmelCase )
__snake_case : List[Any] = list(UpperCAmelCase )
__snake_case : Union[str, Any] = list(UpperCAmelCase )
__snake_case : List[Any] = conv_bias
__snake_case : Optional[Any] = num_conv_pos_embeddings
__snake_case : Tuple = num_conv_pos_embedding_groups
__snake_case : Optional[Any] = conv_pos_kernel_size
__snake_case : Tuple = len(self.conv_dim )
__snake_case : str = num_hidden_layers
__snake_case : str = intermediate_size
__snake_case : Optional[Any] = hidden_act
__snake_case : Union[str, Any] = num_attention_heads
__snake_case : Any = hidden_dropout
__snake_case : int = attention_dropout
__snake_case : List[str] = activation_dropout
__snake_case : Optional[Any] = feat_proj_dropout
__snake_case : int = final_dropout
__snake_case : Any = layerdrop
__snake_case : int = layer_norm_eps
__snake_case : List[str] = initializer_range
__snake_case : Any = vocab_size
__snake_case : List[str] = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__snake_case : Dict = mask_time_prob
__snake_case : Tuple = mask_time_length
__snake_case : Optional[Any] = mask_time_min_masks
__snake_case : Optional[int] = mask_feature_prob
__snake_case : List[Any] = mask_feature_length
__snake_case : List[Any] = mask_feature_min_masks
# ctc loss
__snake_case : int = ctc_loss_reduction
__snake_case : Optional[Any] = ctc_zero_infinity
# adapter
__snake_case : str = add_adapter
__snake_case : List[str] = adapter_kernel_size
__snake_case : Optional[Any] = adapter_stride
__snake_case : int = num_adapter_layers
__snake_case : int = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__snake_case : List[str] = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__snake_case : Tuple = list(UpperCAmelCase )
__snake_case : Dict = list(UpperCAmelCase )
__snake_case : Dict = list(UpperCAmelCase )
__snake_case : Union[str, Any] = xvector_output_dim
@property
def UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
return math.prod(self.conv_stride )
| 326 |
def lowerCAmelCase__( lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : str , lowercase : List[Any] , lowercase : List[str] ) -> int:
if index == r:
for j in range(lowercase ):
print(data[j] , end=" " )
print(" " )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__snake_case : Union[str, Any] = arr[i]
combination_util(lowercase , lowercase , lowercase , index + 1 , lowercase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowercase , lowercase , lowercase , lowercase , lowercase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCAmelCase__( lowercase : Any , lowercase : Tuple , lowercase : Union[str, Any] ) -> Optional[Any]:
# A temporary array to store all combination one by one
__snake_case : Tuple = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowercase , lowercase , lowercase , 0 , lowercase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
_UpperCamelCase = [10, 20, 30, 40, 50]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu
| 326 | 1 |
import os
import numpy
import onnx
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Optional[Any]:
lowercase__ = a.name
lowercase__ = b.name
lowercase__ = """"""
lowercase__ = """"""
lowercase__ = a == b
lowercase__ = name_a
lowercase__ = name_b
return res
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Dict:
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Tuple:
for n in graph_proto.node:
_node_replace_input_with(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> List[str]:
lowercase__ = list(model.graph.initializer )
lowercase__ = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowercase__ = inits[i].name
lowercase__ = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def __UpperCamelCase (_SCREAMING_SNAKE_CASE ) -> str:
lowercase__ = os.path.dirname(__SCREAMING_SNAKE_CASE )
lowercase__ = os.path.basename(__SCREAMING_SNAKE_CASE )
lowercase__ = onnx.load(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
lowercase__ = list(model.graph.initializer )
lowercase__ = set()
lowercase__ = {}
lowercase__ = []
lowercase__ = 0
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(__SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(__SCREAMING_SNAKE_CASE )
dup_set.add(__SCREAMING_SNAKE_CASE )
lowercase__ = inits[j].data_type
lowercase__ = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print('unexpected data type: ' , __SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
lowercase__ = inits[i].name
lowercase__ = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(__SCREAMING_SNAKE_CASE )
else:
lowercase__ = [name_j]
ind_to_replace.append((j, i) )
print('total reduced size: ' , total_reduced_size / 1024 / 1024 / 1024 , 'GB' )
lowercase__ = sorted(__SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase__ = """optimized_""" + model_file_name
lowercase__ = os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
onnx.save(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return new_model
| 370 |
import unittest
import numpy as np
from transformers import DistilBertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.distilbert.modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
)
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
def __init__( self : List[str] , a : Dict , a : Optional[int]=13 , a : int=7 , a : List[str]=True , a : Any=True , a : Dict=True , a : List[Any]=True , a : List[str]=99 , a : Dict=32 , a : List[str]=5 , a : Tuple=4 , a : Optional[int]=37 , a : Union[str, Any]="gelu" , a : Optional[Any]=0.1 , a : Optional[int]=0.1 , a : Optional[Any]=512 , a : Dict=16 , a : Any=2 , a : Tuple=0.02 , a : Optional[Any]=4 , )-> int:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_attention_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = hidden_size
lowercase__ = num_hidden_layers
lowercase__ = num_attention_heads
lowercase__ = intermediate_size
lowercase__ = hidden_act
lowercase__ = hidden_dropout_prob
lowercase__ = attention_probs_dropout_prob
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = type_sequence_label_size
lowercase__ = initializer_range
lowercase__ = num_choices
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] )-> Union[str, Any]:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__ = None
if self.use_attention_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__ = DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , tie_weights_=a , )
return config, input_ids, attention_mask
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] )-> Any:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
lowercase__ , lowercase__ , lowercase__ = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
@require_flax
class SCREAMING_SNAKE_CASE (UpperCAmelCase , unittest.TestCase ):
_UpperCamelCase : List[Any] = (
(
FlaxDistilBertModel,
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def SCREAMING_SNAKE_CASE_ ( self : Dict )-> List[str]:
"""simple docstring"""
lowercase__ = FlaxDistilBertModelTester(self )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> int:
"""simple docstring"""
for model_class_name in self.all_model_classes:
lowercase__ = model_class_name.from_pretrained('distilbert-base-uncased' )
lowercase__ = model(np.ones((1, 1) ) )
self.assertIsNotNone(a )
@require_flax
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[str] )-> List[str]:
"""simple docstring"""
lowercase__ = FlaxDistilBertModel.from_pretrained('distilbert-base-uncased' )
lowercase__ = np.array([[0, 345, 232, 328, 740, 140, 1_695, 69, 6_078, 1_588, 2]] )
lowercase__ = np.array([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowercase__ = model(a , attention_mask=a )[0]
lowercase__ = (1, 11, 768)
self.assertEqual(output.shape , a )
lowercase__ = np.array([[[-0.1639, 0.3299, 0.1648], [-0.1746, 0.3289, 0.1710], [-0.1884, 0.3357, 0.1810]]] )
self.assertTrue(jnp.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) )
| 269 | 0 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class UpperCamelCase__ ( lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : "DiagonalGaussianDistribution"
class UpperCamelCase__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
__snake_case : str = True
@register_to_config
def __init__( self : str ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : int = 3 ,lowerCamelCase__ : Tuple[str] = ("DownEncoderBlock2D",) ,lowerCamelCase__ : Tuple[str] = ("UpDecoderBlock2D",) ,lowerCamelCase__ : Tuple[int] = (64,) ,lowerCamelCase__ : int = 1 ,lowerCamelCase__ : str = "silu" ,lowerCamelCase__ : int = 4 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : int = 32 ,lowerCamelCase__ : float = 0.18215 ,) -> Tuple:
'''simple docstring'''
super().__init__()
# pass init params to Encoder
SCREAMING_SNAKE_CASE = Encoder(
in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,down_block_types=lowerCamelCase__ ,block_out_channels=lowerCamelCase__ ,layers_per_block=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,norm_num_groups=lowerCamelCase__ ,double_z=lowerCamelCase__ ,)
# pass init params to Decoder
SCREAMING_SNAKE_CASE = Decoder(
in_channels=lowerCamelCase__ ,out_channels=lowerCamelCase__ ,up_block_types=lowerCamelCase__ ,block_out_channels=lowerCamelCase__ ,layers_per_block=lowerCamelCase__ ,norm_num_groups=lowerCamelCase__ ,act_fn=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = nn.Convad(2 * latent_channels ,2 * latent_channels ,1 )
SCREAMING_SNAKE_CASE = nn.Convad(lowerCamelCase__ ,lowerCamelCase__ ,1 )
SCREAMING_SNAKE_CASE = False
SCREAMING_SNAKE_CASE = False
# only relevant if vae tiling is enabled
SCREAMING_SNAKE_CASE = self.config.sample_size
SCREAMING_SNAKE_CASE = (
self.config.sample_size[0]
if isinstance(self.config.sample_size ,(list, tuple) )
else self.config.sample_size
)
SCREAMING_SNAKE_CASE = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
SCREAMING_SNAKE_CASE = 0.25
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[Any]=False ) -> Union[str, Any]:
'''simple docstring'''
if isinstance(lowerCamelCase__ ,(Encoder, Decoder) ):
SCREAMING_SNAKE_CASE = value
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : bool = True ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = use_tiling
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.enable_tiling(lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
def SCREAMING_SNAKE_CASE__ ( self : str ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def SCREAMING_SNAKE_CASE__ ( self : Dict ) -> Dict[str, AttentionProcessor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
def fn_recursive_add_processors(lowerCamelCase__ : str ,lowerCamelCase__ : torch.nn.Module ,lowerCamelCase__ : Dict[str, AttentionProcessor] ):
if hasattr(lowerCamelCase__ ,"""set_processor""" ):
SCREAMING_SNAKE_CASE = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(F"""{name}.{sub_name}""" ,lowerCamelCase__ ,lowerCamelCase__ )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
return processors
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = len(self.attn_processors.keys() )
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != count:
raise ValueError(
F"""A dict of processors was passed, but the number of processors {len(lowerCamelCase__ )} does not match the"""
F""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(lowerCamelCase__ : str ,lowerCamelCase__ : torch.nn.Module ,lowerCamelCase__ : Optional[int] ):
if hasattr(lowerCamelCase__ ,"""set_processor""" ):
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
module.set_processor(lowerCamelCase__ )
else:
module.set_processor(processor.pop(F"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(F"""{name}.{sub_name}""" ,lowerCamelCase__ ,lowerCamelCase__ )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(lowerCamelCase__ ,return_dict=lowerCamelCase__ )
if self.use_slicing and x.shape[0] > 1:
SCREAMING_SNAKE_CASE = [self.encoder(lowerCamelCase__ ) for x_slice in x.split(1 )]
SCREAMING_SNAKE_CASE = torch.cat(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = self.encoder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.quant_conv(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(lowerCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(lowerCamelCase__ ,return_dict=lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.post_quant_conv(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.decoder(lowerCamelCase__ )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
@apply_forward_hook
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
if self.use_slicing and z.shape[0] > 1:
SCREAMING_SNAKE_CASE = [self._decode(lowerCamelCase__ ).sample for z_slice in z.split(1 )]
SCREAMING_SNAKE_CASE = torch.cat(lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = self._decode(lowerCamelCase__ ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ,lowerCamelCase__ : List[str] ,lowerCamelCase__ : Optional[int] ,lowerCamelCase__ : Tuple ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = min(a.shape[2] ,b.shape[2] ,lowerCamelCase__ )
for y in range(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ,lowerCamelCase__ : Any ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = min(a.shape[3] ,b.shape[3] ,lowerCamelCase__ )
for x in range(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def SCREAMING_SNAKE_CASE__ ( self : Tuple ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ) -> AutoencoderKLOutput:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
SCREAMING_SNAKE_CASE = []
for i in range(0 ,x.shape[2] ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = []
for j in range(0 ,x.shape[3] ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
SCREAMING_SNAKE_CASE = self.encoder(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.quant_conv(lowerCamelCase__ )
row.append(lowerCamelCase__ )
rows.append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = []
for i, row in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = []
for j, tile in enumerate(lowerCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] ,lowerCamelCase__ ,lowerCamelCase__ )
if j > 0:
SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] ,lowerCamelCase__ ,lowerCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase__ ,dim=3 ) )
SCREAMING_SNAKE_CASE = torch.cat(lowerCamelCase__ ,dim=2 )
SCREAMING_SNAKE_CASE = DiagonalGaussianDistribution(lowerCamelCase__ )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
SCREAMING_SNAKE_CASE = int(self.tile_sample_min_size * self.tile_overlap_factor )
SCREAMING_SNAKE_CASE = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
SCREAMING_SNAKE_CASE = []
for i in range(0 ,z.shape[2] ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = []
for j in range(0 ,z.shape[3] ,lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
SCREAMING_SNAKE_CASE = self.post_quant_conv(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = self.decoder(lowerCamelCase__ )
row.append(lowerCamelCase__ )
rows.append(lowerCamelCase__ )
SCREAMING_SNAKE_CASE = []
for i, row in enumerate(lowerCamelCase__ ):
SCREAMING_SNAKE_CASE = []
for j, tile in enumerate(lowerCamelCase__ ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
SCREAMING_SNAKE_CASE = self.blend_v(rows[i - 1][j] ,lowerCamelCase__ ,lowerCamelCase__ )
if j > 0:
SCREAMING_SNAKE_CASE = self.blend_h(row[j - 1] ,lowerCamelCase__ ,lowerCamelCase__ )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(lowerCamelCase__ ,dim=3 ) )
SCREAMING_SNAKE_CASE = torch.cat(lowerCamelCase__ ,dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ,lowerCamelCase__ : torch.FloatTensor ,lowerCamelCase__ : bool = False ,lowerCamelCase__ : bool = True ,lowerCamelCase__ : Optional[torch.Generator] = None ,) -> Union[DecoderOutput, torch.FloatTensor]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = sample
SCREAMING_SNAKE_CASE = self.encode(lowerCamelCase__ ).latent_dist
if sample_posterior:
SCREAMING_SNAKE_CASE = posterior.sample(generator=lowerCamelCase__ )
else:
SCREAMING_SNAKE_CASE = posterior.mode()
SCREAMING_SNAKE_CASE = self.decode(lowerCamelCase__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=lowerCamelCase__ )
| 296 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
SCREAMING_SNAKE_CASE = [1_44, 1_92, 2_40]
SCREAMING_SNAKE_CASE = [16, 32, 64, 96, 1_28, 1_60, 6_40]
elif "mobilevit_xs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [96, 1_20, 1_44]
SCREAMING_SNAKE_CASE = [16, 32, 48, 64, 80, 96, 3_84]
elif "mobilevit_xxs" in mobilevit_name:
SCREAMING_SNAKE_CASE = [64, 80, 96]
SCREAMING_SNAKE_CASE = [16, 16, 24, 48, 64, 80, 3_20]
SCREAMING_SNAKE_CASE = 0.05
SCREAMING_SNAKE_CASE = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = 5_12
SCREAMING_SNAKE_CASE = 16
SCREAMING_SNAKE_CASE = 21
SCREAMING_SNAKE_CASE = """pascal-voc-id2label.json"""
else:
SCREAMING_SNAKE_CASE = 10_00
SCREAMING_SNAKE_CASE = """imagenet-1k-id2label.json"""
SCREAMING_SNAKE_CASE = """huggingface/label-files"""
SCREAMING_SNAKE_CASE = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="""dataset""" ) , """r""" ) )
SCREAMING_SNAKE_CASE = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE = idalabel
SCREAMING_SNAKE_CASE = {v: k for k, v in idalabel.items()}
return config
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
for i in range(1 , 6 ):
if F"""layer_{i}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F"""layer_{i}.""" , F"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
SCREAMING_SNAKE_CASE = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
SCREAMING_SNAKE_CASE = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if F""".{i}.{j}.""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".{i}.{j}.""" , F""".{i}.""" )
if "expand_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
SCREAMING_SNAKE_CASE = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
SCREAMING_SNAKE_CASE = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if F""".global_rep.{i}.weight""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if F""".global_rep.{i}.bias""" in name:
SCREAMING_SNAKE_CASE = name.replace(F""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
SCREAMING_SNAKE_CASE = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
SCREAMING_SNAKE_CASE = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
SCREAMING_SNAKE_CASE = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
SCREAMING_SNAKE_CASE = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
SCREAMING_SNAKE_CASE = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
SCREAMING_SNAKE_CASE = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
SCREAMING_SNAKE_CASE = """mobilevit.""" + name
return name
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> Dict:
'''simple docstring'''
if base_model:
SCREAMING_SNAKE_CASE = """"""
else:
SCREAMING_SNAKE_CASE = """mobilevit."""
for key in orig_state_dict.copy().keys():
SCREAMING_SNAKE_CASE = orig_state_dict.pop(_SCREAMING_SNAKE_CASE )
if key[:8] == "encoder.":
SCREAMING_SNAKE_CASE = key[8:]
if "qkv" in key:
SCREAMING_SNAKE_CASE = key.split(""".""" )
SCREAMING_SNAKE_CASE = int(key_split[0][6:] ) - 1
SCREAMING_SNAKE_CASE = int(key_split[3] )
SCREAMING_SNAKE_CASE = model.get_submodule(F"""{model_prefix}encoder.layer.{layer_num}""" )
SCREAMING_SNAKE_CASE = layer.transformer.layer[transformer_num].attention.attention.all_head_size
SCREAMING_SNAKE_CASE = (
F"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
SCREAMING_SNAKE_CASE = val[:dim, :]
SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
SCREAMING_SNAKE_CASE = val[:dim]
SCREAMING_SNAKE_CASE = val[dim : dim * 2]
SCREAMING_SNAKE_CASE = val[-dim:]
else:
SCREAMING_SNAKE_CASE = val
return orig_state_dict
def __lowercase ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """http://images.cocodataset.org/val2017/000000039769.jpg"""
SCREAMING_SNAKE_CASE = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_mobilevit_config(_SCREAMING_SNAKE_CASE )
# load original state_dict
SCREAMING_SNAKE_CASE = torch.load(_SCREAMING_SNAKE_CASE , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
SCREAMING_SNAKE_CASE = MobileViTForSemanticSegmentation(_SCREAMING_SNAKE_CASE ).eval()
else:
SCREAMING_SNAKE_CASE = MobileViTForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
SCREAMING_SNAKE_CASE = convert_state_dict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
model.load_state_dict(_SCREAMING_SNAKE_CASE )
# Check outputs on an image, prepared by MobileViTImageProcessor
SCREAMING_SNAKE_CASE = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
SCREAMING_SNAKE_CASE = image_processor(images=prepare_img() , return_tensors="""pt""" )
SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.2_065, 6.1_292, 6.2_070], [6.1_079, 6.1_254, 6.1_747], [6.0_042, 6.1_071, 6.1_034]],
[[-6.9_253, -6.8_653, -7.0_398], [-7.3_218, -7.3_983, -7.3_670], [-7.1_961, -7.2_482, -7.1_569]],
[[-4.4_723, -4.4_348, -4.3_769], [-5.3_629, -5.4_632, -5.4_598], [-5.1_587, -5.3_402, -5.5_059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[5.4_449, 5.5_733, 5.6_314], [5.1_815, 5.3_930, 5.5_963], [5.1_656, 5.4_333, 5.4_853]],
[[-9.4_423, -9.7_766, -9.6_714], [-9.1_581, -9.5_720, -9.5_519], [-9.1_006, -9.6_458, -9.5_703]],
[[-7.7_721, -7.3_716, -7.1_583], [-8.4_599, -8.0_624, -7.7_944], [-8.4_172, -7.8_366, -7.5_025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor(
[
[[6.9_811, 6.9_743, 7.3_123], [7.1_777, 7.1_931, 7.3_938], [7.5_633, 7.8_050, 7.8_901]],
[[-10.5_536, -10.2_332, -10.2_924], [-10.2_336, -9.8_624, -9.5_964], [-10.8_840, -10.8_158, -10.6_659]],
[[-3.4_938, -3.0_631, -2.8_620], [-3.4_205, -2.8_135, -2.6_875], [-3.4_179, -2.7_945, -2.8_750]],
] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
else:
assert logits.shape == (1, 10_00)
if mobilevit_name == "mobilevit_s":
SCREAMING_SNAKE_CASE = torch.tensor([-0.9_866, 0.2_392, -1.1_241] )
elif mobilevit_name == "mobilevit_xs":
SCREAMING_SNAKE_CASE = torch.tensor([-2.4_761, -0.9_399, -1.9_587] )
elif mobilevit_name == "mobilevit_xxs":
SCREAMING_SNAKE_CASE = torch.tensor([-1.9_364, -1.2_327, -0.4_653] )
else:
raise ValueError(F"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(_SCREAMING_SNAKE_CASE )
print(F"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(_SCREAMING_SNAKE_CASE )
if push_to_hub:
SCREAMING_SNAKE_CASE = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
SCREAMING_SNAKE_CASE = model_mapping[mobilevit_name]
image_processor.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
model.push_to_hub(_SCREAMING_SNAKE_CASE , organization="""apple""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 296 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class UpperCAmelCase_ :
def __init__( self : Optional[Any] , UpperCAmelCase__ : Any , ) -> Any:
lowerCAmelCase = parent
lowerCAmelCase = 1_3
lowerCAmelCase = 7
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = True
lowerCAmelCase = 9_9
lowerCAmelCase = 3_2
lowerCAmelCase = 2
lowerCAmelCase = 4
lowerCAmelCase = 3_7
lowerCAmelCase = 'gelu'
lowerCAmelCase = 0.1
lowerCAmelCase = 0.1
lowerCAmelCase = 5_1_2
lowerCAmelCase = 1_6
lowerCAmelCase = 2
lowerCAmelCase = 0.02
lowerCAmelCase = 3
lowerCAmelCase = 4
lowerCAmelCase = None
def __UpperCAmelCase ( self : List[Any] ) -> str:
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase = None
if self.use_input_mask:
lowerCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __UpperCAmelCase ( self : Optional[Any] ) -> Any:
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = self.prepare_config_and_inputs()
lowerCAmelCase = True
lowerCAmelCase = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : str ) -> Tuple:
lowerCAmelCase = TFEsmModel(config=UpperCAmelCase__ )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : int , UpperCAmelCase__ : int , ) -> int:
lowerCAmelCase = True
lowerCAmelCase = TFEsmModel(config=UpperCAmelCase__ )
lowerCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
lowerCAmelCase = model(UpperCAmelCase__ )
lowerCAmelCase = [input_ids, input_mask]
lowerCAmelCase = model(UpperCAmelCase__ , encoder_hidden_states=UpperCAmelCase__ )
# Also check the case where encoder outputs are not passed
lowerCAmelCase = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : Tuple , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : Optional[Any] , UpperCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = TFEsmForMaskedLM(config=UpperCAmelCase__ )
lowerCAmelCase = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : Dict , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : List[str] , UpperCAmelCase__ : int , UpperCAmelCase__ : Dict , UpperCAmelCase__ : Any ) -> Optional[Any]:
lowerCAmelCase = self.num_labels
lowerCAmelCase = TFEsmForTokenClassification(config=UpperCAmelCase__ )
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
lowerCAmelCase = model(UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __UpperCAmelCase ( self : Tuple ) -> Optional[int]:
lowerCAmelCase = self.prepare_config_and_inputs()
(
(
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) , (
lowerCAmelCase
) ,
) = config_and_inputs
lowerCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase_ ( __lowercase , __lowercase , unittest.TestCase ):
lowerCamelCase : Tuple = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCamelCase : Dict = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : Any = False
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
lowerCAmelCase = TFEsmModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=3_7 )
def __UpperCAmelCase ( self : Any ) -> Tuple:
self.config_tester.run_common_tests()
def __UpperCAmelCase ( self : str ) -> List[Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : List[str] ) -> Any:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def __UpperCAmelCase ( self : Any ) -> List[str]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def __UpperCAmelCase ( self : Optional[Any] ) -> str:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase = TFEsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
lowerCAmelCase , lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(UpperCAmelCase__ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
lowerCAmelCase = model.get_bias()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for k, v in name.items():
assert isinstance(UpperCAmelCase__ , tf.Variable )
else:
lowerCAmelCase = model.get_output_embeddings()
assert x is None
lowerCAmelCase = model.get_bias()
assert name is None
@require_tf
class UpperCAmelCase_ ( unittest.TestCase ):
@slow
def __UpperCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
lowerCAmelCase = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
lowerCAmelCase = [1, 6, 3_3]
self.assertEqual(list(output.numpy().shape ) , UpperCAmelCase__ )
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[8.921_518, -10.589_814, -6.4_671_307],
[-6.3_967_156, -13.911_377, -1.1_211_915],
[-7.781_247, -13.951_557, -3.740_592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def __UpperCAmelCase ( self : Dict ) -> List[Any]:
lowerCAmelCase = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
lowerCAmelCase = tf.constant([[0, 6, 4, 1_3, 5, 4, 1_6, 1_2, 1_1, 7, 2]] )
lowerCAmelCase = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
lowerCAmelCase = tf.constant(
[
[
[0.14_443_092, 0.54_125_327, 0.3_247_739],
[0.30_340_484, 0.00_526_676, 0.31_077_722],
[0.32_278_043, -0.24_987_096, 0.3_414_628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 360 |
'''simple docstring'''
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 55 | 0 |
"""simple docstring"""
def A ( snake_case :int , snake_case :int ) -> bool:
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
UpperCamelCase : Union[str, Any] = subprocess.check_output("git merge-base main HEAD".split()).decode("utf-8")
UpperCamelCase : Any = subprocess.check_output(f'''git diff --name-only {fork_point_sha}'''.split()).decode("utf-8").split()
UpperCamelCase : Tuple = "|".join(sys.argv[1:])
UpperCamelCase : Optional[int] = re.compile(Rf'''^({joined_dirs}).*?\.py$''')
UpperCamelCase : Optional[Any] = [x for x in modified_files if regex.match(x)]
print(" ".join(relevant_modified_files), end="")
| 316 | 1 |
from __future__ import annotations
def lowercase__ ( __snake_case : int | str ):
'''simple docstring'''
UpperCAmelCase_ : int = str(__snake_case )
return n == n[::-1]
def lowercase__ ( __snake_case : int = 1_000_000 ):
'''simple docstring'''
UpperCAmelCase_ : Optional[Any] = 0
for i in range(1 , __snake_case ):
if is_palindrome(__snake_case ) and is_palindrome(bin(__snake_case ).split('b' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 145 |
import unittest
from huggingface_hub import hf_hub_download
from transformers import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING, VideoMAEFeatureExtractor
from transformers.pipelines import VideoClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_decord,
require_tf,
require_torch,
require_torch_or_tf,
require_vision,
)
from .test_pipelines_common import ANY
@is_pipeline_test
@require_torch_or_tf
@require_vision
@require_decord
class lowerCamelCase (unittest.TestCase ):
'''simple docstring'''
_snake_case : List[str] = MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Optional[Any]:
UpperCAmelCase_ : List[str] = hf_hub_download(
repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : str = VideoClassificationPipeline(model=_UpperCamelCase , image_processor=_UpperCamelCase , top_k=2 )
UpperCAmelCase_ : List[str] = [
example_video_filepath,
'https://huggingface.co/datasets/nateraw/video-demo/resolve/main/archery.mp4',
]
return video_classifier, examples
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> Dict:
for example in examples:
UpperCAmelCase_ : str = video_classifier(_UpperCamelCase )
self.assertEqual(
_UpperCamelCase , [
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
{'score': ANY(_UpperCamelCase ), 'label': ANY(_UpperCamelCase )},
] , )
@require_torch
def __UpperCAmelCase ( self ) -> List[Any]:
UpperCAmelCase_ : str = 'hf-internal-testing/tiny-random-VideoMAEForVideoClassification'
UpperCAmelCase_ : Optional[Any] = VideoMAEFeatureExtractor(
size={'shortest_edge': 1_0} , crop_size={'height': 1_0, 'width': 1_0} )
UpperCAmelCase_ : str = pipeline(
'video-classification' , model=_UpperCamelCase , feature_extractor=_UpperCamelCase , frame_sampling_rate=4 )
UpperCAmelCase_ : Any = hf_hub_download(repo_id='nateraw/video-demo' , filename='archery.mp4' , repo_type='dataset' )
UpperCAmelCase_ : List[str] = video_classifier(_UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}] , )
UpperCAmelCase_ : Tuple = video_classifier(
[
video_file_path,
video_file_path,
] , top_k=2 , )
self.assertEqual(
nested_simplify(_UpperCamelCase , decimals=4 ) , [
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
[{'score': 0.51_99, 'label': 'LABEL_0'}, {'score': 0.48_01, 'label': 'LABEL_1'}],
] , )
@require_tf
def __UpperCAmelCase ( self ) -> Dict:
pass
| 145 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : Optional[Any] = {
"""configuration_upernet""": ["""UperNetConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""UperNetForSemanticSegmentation""",
"""UperNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
_UpperCAmelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 50 |
import argparse
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import GLPNConfig, GLPNForDepthEstimation, GLPNImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_UpperCAmelCase : Dict = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : str = OrderedDict()
for key, value in state_dict.items():
if key.startswith('module.encoder' ):
lowerCamelCase__ : Optional[Any] = key.replace('module.encoder' , 'glpn.encoder' )
if key.startswith('module.decoder' ):
lowerCamelCase__ : List[str] = key.replace('module.decoder' , 'decoder.stages' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
lowerCamelCase__ : Dict = key[key.find('patch_embed' ) + len('patch_embed' )]
lowerCamelCase__ : Tuple = key.replace(F"""patch_embed{idx}""" , F"""patch_embeddings.{int(_UpperCAmelCase )-1}""" )
if "norm" in key:
lowerCamelCase__ : str = key.replace('norm' , 'layer_norm' )
if "glpn.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
lowerCamelCase__ : Dict = key[key.find('glpn.encoder.layer_norm' ) + len('glpn.encoder.layer_norm' )]
lowerCamelCase__ : str = key.replace(F"""layer_norm{idx}""" , F"""layer_norm.{int(_UpperCAmelCase )-1}""" )
if "layer_norm1" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
lowerCamelCase__ : Optional[int] = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
lowerCamelCase__ : List[Any] = key[key.find('block' ) + len('block' )]
lowerCamelCase__ : int = key.replace(F"""block{idx}""" , F"""block.{int(_UpperCAmelCase )-1}""" )
if "attn.q" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
lowerCamelCase__ : Dict = key.replace('attn' , 'attention.self' )
if "fc1" in key:
lowerCamelCase__ : Dict = key.replace('fc1' , 'dense1' )
if "fc2" in key:
lowerCamelCase__ : Any = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
lowerCamelCase__ : Dict = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
lowerCamelCase__ : Tuple = key.replace('linear_fuse.conv' , 'linear_fuse' )
lowerCamelCase__ : List[str] = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
lowerCamelCase__ : Optional[Any] = key[key.find('linear_c' ) + len('linear_c' )]
lowerCamelCase__ : Dict = key.replace(F"""linear_c{idx}""" , F"""linear_c.{int(_UpperCAmelCase )-1}""" )
if "bot_conv" in key:
lowerCamelCase__ : str = key.replace('bot_conv' , '0.convolution' )
if "skip_conv1" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('skip_conv1' , '1.convolution' )
if "skip_conv2" in key:
lowerCamelCase__ : List[Any] = key.replace('skip_conv2' , '2.convolution' )
if "fusion1" in key:
lowerCamelCase__ : Optional[int] = key.replace('fusion1' , '1.fusion' )
if "fusion2" in key:
lowerCamelCase__ : Union[str, Any] = key.replace('fusion2' , '2.fusion' )
if "fusion3" in key:
lowerCamelCase__ : List[Any] = key.replace('fusion3' , '3.fusion' )
if "fusion" in key and "conv" in key:
lowerCamelCase__ : str = key.replace('conv' , 'convolutional_layer' )
if key.startswith('module.last_layer_depth' ):
lowerCamelCase__ : Dict = key.replace('module.last_layer_depth' , 'head.head' )
lowerCamelCase__ : str = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
# for each of the encoder blocks:
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
lowerCamelCase__ : Any = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.weight""" )
lowerCamelCase__ : Optional[Any] = state_dict.pop(F"""glpn.encoder.block.{i}.{j}.attention.self.kv.bias""" )
# next, add keys and values (in that order) to the state dict
lowerCamelCase__ : Optional[int] = kv_weight[
: config.hidden_sizes[i], :
]
lowerCamelCase__ : Optional[int] = kv_bias[: config.hidden_sizes[i]]
lowerCamelCase__ : Any = kv_weight[
config.hidden_sizes[i] :, :
]
lowerCamelCase__ : Dict = kv_bias[config.hidden_sizes[i] :]
def SCREAMING_SNAKE_CASE ( ) -> str:
lowerCamelCase__ : List[str] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCamelCase__ : Tuple = Image.open(requests.get(_UpperCAmelCase , stream=_UpperCAmelCase ).raw )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=False , _UpperCAmelCase=None ) -> Optional[int]:
lowerCamelCase__ : str = GLPNConfig(hidden_sizes=[64, 128, 320, 512] , decoder_hidden_size=64 , depths=[3, 8, 27, 3] )
# load image processor (only resize + rescale)
lowerCamelCase__ : Union[str, Any] = GLPNImageProcessor()
# prepare image
lowerCamelCase__ : str = prepare_img()
lowerCamelCase__ : Tuple = image_processor(images=_UpperCAmelCase , return_tensors='pt' ).pixel_values
logger.info('Converting model...' )
# load original state dict
lowerCamelCase__ : Any = torch.load(_UpperCAmelCase , map_location=torch.device('cpu' ) )
# rename keys
lowerCamelCase__ : str = rename_keys(_UpperCAmelCase )
# key and value matrices need special treatment
read_in_k_v(_UpperCAmelCase , _UpperCAmelCase )
# create HuggingFace model and load state dict
lowerCamelCase__ : Dict = GLPNForDepthEstimation(_UpperCAmelCase )
model.load_state_dict(_UpperCAmelCase )
model.eval()
# forward pass
lowerCamelCase__ : List[str] = model(_UpperCAmelCase )
lowerCamelCase__ : Tuple = outputs.predicted_depth
# verify output
if model_name is not None:
if "nyu" in model_name:
lowerCamelCase__ : List[Any] = torch.tensor(
[[4.4_147, 4.0_873, 4.0_673], [3.7_890, 3.2_881, 3.1_525], [3.7_674, 3.5_423, 3.4_913]] )
elif "kitti" in model_name:
lowerCamelCase__ : List[str] = torch.tensor(
[[3.4_291, 2.7_865, 2.5_151], [3.2_841, 2.7_021, 2.3_502], [3.1_147, 2.4_625, 2.2_481]] )
else:
raise ValueError(F"""Unknown model name: {model_name}""" )
lowerCamelCase__ : Tuple = torch.Size([1, 480, 640] )
assert predicted_depth.shape == expected_shape
assert torch.allclose(predicted_depth[0, :3, :3] , _UpperCAmelCase , atol=1e-4 )
print('Looks ok!' )
# finally, push to hub if required
if push_to_hub:
logger.info('Pushing model and image processor to the hub...' )
model.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add model' , use_temp_dir=_UpperCAmelCase , )
image_processor.push_to_hub(
repo_path_or_name=Path(_UpperCAmelCase , _UpperCAmelCase ) , organization='nielsr' , commit_message='Add image processor' , use_temp_dir=_UpperCAmelCase , )
if __name__ == "__main__":
_UpperCAmelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_path""",
default=None,
type=str,
help="""Path to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
parser.add_argument(
"""--model_name""",
default="""glpn-kitti""",
type=str,
help="""Name of the model in case you're pushing to the hub.""",
)
_UpperCAmelCase : int = parser.parse_args()
convert_glpn_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name)
| 50 | 1 |
import warnings
from ...utils import logging
from .image_processing_flava import FlavaImageProcessor
snake_case_ = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ (__snake_case ):
def __init__( self , *a , **a):
warnings.warn(
'The class FlavaFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use FlavaImageProcessor instead.' , a , )
super().__init__(*a , **a)
| 216 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
snake_case_ = {'''configuration_encoder_decoder''': ['''EncoderDecoderConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''EncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''TFEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case_ = ['''FlaxEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_encoder_decoder import EncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encoder_decoder import EncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_encoder_decoder import TFEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_encoder_decoder import FlaxEncoderDecoderModel
else:
import sys
snake_case_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 216 | 1 |
import sys
from collections import defaultdict
class _UpperCamelCase :
def __init__( self :Optional[Any] ) -> int:
UpperCAmelCase__ = []
def UpperCAmelCase_ ( self :Optional[int] , lowerCamelCase :Optional[Any] ) -> Optional[Any]:
return self.node_position[vertex]
def UpperCAmelCase_ ( self :List[Any] , lowerCamelCase :List[Any] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = pos
def UpperCAmelCase_ ( self :Optional[Any] , lowerCamelCase :str , lowerCamelCase :Tuple , lowerCamelCase :Dict , lowerCamelCase :int ) -> List[Any]:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
UpperCAmelCase__ = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
UpperCAmelCase__ = 2 * start + 1
else:
UpperCAmelCase__ = 2 * start + 2
if heap[smallest_child] < heap[start]:
UpperCAmelCase__ , UpperCAmelCase__ = heap[smallest_child], positions[smallest_child]
UpperCAmelCase__ , UpperCAmelCase__ = (
heap[start],
positions[start],
)
UpperCAmelCase__ , UpperCAmelCase__ = temp, tempa
UpperCAmelCase__ = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase )
self.top_to_bottom(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
def UpperCAmelCase_ ( self :Union[str, Any] , lowerCamelCase :int , lowerCamelCase :Optional[int] , lowerCamelCase :Tuple , lowerCamelCase :List[str] ) -> Union[str, Any]:
UpperCAmelCase__ = position[index]
while index != 0:
UpperCAmelCase__ = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
UpperCAmelCase__ = heap[parent]
UpperCAmelCase__ = position[parent]
self.set_position(position[parent] , lowerCamelCase )
else:
UpperCAmelCase__ = val
UpperCAmelCase__ = temp
self.set_position(lowerCamelCase , lowerCamelCase )
break
UpperCAmelCase__ = parent
else:
UpperCAmelCase__ = val
UpperCAmelCase__ = temp
self.set_position(lowerCamelCase , 0 )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :Union[str, Any] , lowerCamelCase :Any ) -> str:
UpperCAmelCase__ = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase , -1 , -1 ):
self.top_to_bottom(lowerCamelCase , lowerCamelCase , len(lowerCamelCase ) , lowerCamelCase )
def UpperCAmelCase_ ( self :str , lowerCamelCase :Any , lowerCamelCase :List[str] ) -> Dict:
UpperCAmelCase__ = positions[0]
UpperCAmelCase__ = sys.maxsize
self.top_to_bottom(lowerCamelCase , 0 , len(lowerCamelCase ) , lowerCamelCase )
return temp
def lowerCAmelCase ( _lowerCAmelCase : int ):
"""simple docstring"""
UpperCAmelCase__ = Heap()
UpperCAmelCase__ = [0] * len(_lowerCAmelCase )
UpperCAmelCase__ = [-1] * len(_lowerCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
UpperCAmelCase__ = [] # Heap of Distance of vertices from their neighboring vertex
UpperCAmelCase__ = []
for vertex in range(len(_lowerCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCAmelCase )
heap.node_position.append(_lowerCAmelCase )
UpperCAmelCase__ = []
UpperCAmelCase__ = 1
UpperCAmelCase__ = sys.maxsize
for neighbor, distance in adjacency_list[0]:
UpperCAmelCase__ = 0
UpperCAmelCase__ = distance
heap.heapify(_lowerCAmelCase , _lowerCAmelCase )
for _ in range(1 , len(_lowerCAmelCase ) ):
UpperCAmelCase__ = heap.delete_minimum(_lowerCAmelCase , _lowerCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
UpperCAmelCase__ = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCAmelCase )]
):
UpperCAmelCase__ = distance
heap.bottom_to_top(
_lowerCAmelCase , heap.get_position(_lowerCAmelCase ) , _lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowerCAmelCase : Optional[Any] = int(input("Enter number of edges: ").strip())
_lowerCAmelCase : int = defaultdict(list)
for _ in range(edges_number):
_lowerCAmelCase : Optional[Any] = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 169 |
import math
class _UpperCamelCase :
def __init__( self :Union[str, Any] , lowerCamelCase :Union[str, Any]=0 ) -> Tuple: # a graph with Node 0,1,...,N-1
UpperCAmelCase__ = n
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # adjacency matrix for weight
UpperCAmelCase__ = [
[math.inf for j in range(0 , lowerCamelCase )] for i in range(0 , lowerCamelCase )
] # dp[i][j] stores minimum distance from i to j
def UpperCAmelCase_ ( self :Tuple , lowerCamelCase :List[Any] , lowerCamelCase :Optional[Any] , lowerCamelCase :int ) -> List[Any]:
UpperCAmelCase__ = w
def UpperCAmelCase_ ( self :Optional[int] ) -> Optional[Any]:
for k in range(0 , self.n ):
for i in range(0 , self.n ):
for j in range(0 , self.n ):
UpperCAmelCase__ = min(self.dp[i][j] , self.dp[i][k] + self.dp[k][j] )
def UpperCAmelCase_ ( self :int , lowerCamelCase :List[Any] , lowerCamelCase :Dict ) -> List[str]:
return self.dp[u][v]
if __name__ == "__main__":
_lowerCAmelCase : Optional[int] = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 169 | 1 |
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[int]] , snake_case : int , snake_case : int , snake_case : list[int] )-> bool:
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[int]] , snake_case : list[int] , snake_case : int )-> bool:
# Base Case
if curr_ind == len(snake_case ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(snake_case ) ):
if valid_connection(snake_case , snake_case , snake_case , snake_case ):
# Insert current vertex into path as next transition
_lowerCamelCase = next_ver
# Validate created path
if util_hamilton_cycle(snake_case , snake_case , curr_ind + 1 ):
return True
# Backtrack
_lowerCamelCase = -1
return False
def SCREAMING_SNAKE_CASE_ ( snake_case : list[list[int]] , snake_case : int = 0 )-> list[int]:
_lowerCamelCase = [-1] * (len(snake_case ) + 1)
# initialize start and end of path with starting index
_lowerCamelCase = _lowerCamelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(snake_case , snake_case , 1 ) else []
| 80 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A_ : Any ={
"""configuration_table_transformer""": [
"""TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""TableTransformerConfig""",
"""TableTransformerOnnxConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A_ : List[str] =[
"""TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TableTransformerForObjectDetection""",
"""TableTransformerModel""",
"""TableTransformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
A_ : Optional[int] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 80 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class __UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Optional[Any], __A : Dict, __A : Tuple=7, __A : Tuple=3, __A : List[str]=1_8, __A : str=3_0, __A : Any=4_0_0, __A : str=True, __A : List[str]=None, __A : Union[str, Any]=True, __A : int=None, __A : List[Any]=True, __A : Dict=[0.5, 0.5, 0.5], __A : Optional[Any]=[0.5, 0.5, 0.5], ):
UpperCAmelCase : int = size if size is not None else {'''shortest_edge''': 1_8}
UpperCAmelCase : Dict = crop_size if crop_size is not None else {'''height''': 1_8, '''width''': 1_8}
UpperCAmelCase : Any = parent
UpperCAmelCase : Any = batch_size
UpperCAmelCase : Optional[Any] = num_channels
UpperCAmelCase : Union[str, Any] = image_size
UpperCAmelCase : Optional[Any] = min_resolution
UpperCAmelCase : str = max_resolution
UpperCAmelCase : str = do_resize
UpperCAmelCase : Any = size
UpperCAmelCase : int = do_center_crop
UpperCAmelCase : Dict = crop_size
UpperCAmelCase : List[Any] = do_normalize
UpperCAmelCase : List[Any] = image_mean
UpperCAmelCase : Tuple = image_std
def __magic_name__ ( self : str ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class __UpperCAmelCase ( lowerCamelCase__ , unittest.TestCase ):
UpperCamelCase = LevitImageProcessor if is_vision_available() else None
def __magic_name__ ( self : Tuple ):
UpperCAmelCase : str = LevitImageProcessingTester(self )
@property
def __magic_name__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self : Optional[Any] ):
UpperCAmelCase : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A, '''image_mean''' ) )
self.assertTrue(hasattr(__A, '''image_std''' ) )
self.assertTrue(hasattr(__A, '''do_normalize''' ) )
self.assertTrue(hasattr(__A, '''do_resize''' ) )
self.assertTrue(hasattr(__A, '''do_center_crop''' ) )
self.assertTrue(hasattr(__A, '''size''' ) )
def __magic_name__ ( self : Union[str, Any] ):
UpperCAmelCase : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''shortest_edge''': 1_8} )
self.assertEqual(image_processor.crop_size, {'''height''': 1_8, '''width''': 1_8} )
UpperCAmelCase : Any = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'''shortest_edge''': 4_2} )
self.assertEqual(image_processor.crop_size, {'''height''': 8_4, '''width''': 8_4} )
def __magic_name__ ( self : Optional[Any] ):
pass
def __magic_name__ ( self : Tuple ):
# Initialize image_processing
UpperCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase : str = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A, Image.Image )
# Test not batched input
UpperCAmelCase : List[Any] = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[Any] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Union[str, Any] ):
# Initialize image_processing
UpperCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase : int = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A, numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A, np.ndarray )
# Test not batched input
UpperCAmelCase : Any = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : List[Any] = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
def __magic_name__ ( self : Any ):
# Initialize image_processing
UpperCAmelCase : str = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase : Optional[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=__A, torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A, torch.Tensor )
# Test not batched input
UpperCAmelCase : int = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
# Test batched
UpperCAmelCase : Any = image_processing(__A, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
), )
| 336 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax.numpy as jnp
from jax import random
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .scheduling_utils_flax import FlaxSchedulerMixin
@flax.struct.dataclass
class __UpperCAmelCase :
# setable values
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None # sigma(t_i)
@classmethod
def __magic_name__ ( cls : Any ):
return cls()
@dataclass
class __UpperCAmelCase ( lowerCamelCase__ ):
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = 42
class __UpperCAmelCase ( lowerCamelCase__ , lowerCamelCase__ ):
@property
def __magic_name__ ( self : Optional[int] ):
return True
@register_to_config
def __init__( self : Optional[int], __A : float = 0.0_2, __A : float = 1_0_0, __A : float = 1.0_0_7, __A : float = 8_0, __A : float = 0.0_5, __A : float = 5_0, ):
pass
def __magic_name__ ( self : Optional[Any] ):
return KarrasVeSchedulerState.create()
def __magic_name__ ( self : int, __A : KarrasVeSchedulerState, __A : int, __A : Tuple = () ):
UpperCAmelCase : Optional[Any] = jnp.arange(0, __A )[::-1].copy()
UpperCAmelCase : Union[str, Any] = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in timesteps
]
return state.replace(
num_inference_steps=__A, schedule=jnp.array(__A, dtype=jnp.floataa ), timesteps=__A, )
def __magic_name__ ( self : List[Any], __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : random.KeyArray, ):
if self.config.s_min <= sigma <= self.config.s_max:
UpperCAmelCase : int = min(self.config.s_churn / state.num_inference_steps, 2**0.5 - 1 )
else:
UpperCAmelCase : Optional[int] = 0
# sample eps ~ N(0, S_noise^2 * I)
UpperCAmelCase : Union[str, Any] = random.split(__A, num=1 )
UpperCAmelCase : List[str] = self.config.s_noise * random.normal(key=__A, shape=sample.shape )
UpperCAmelCase : Tuple = sigma + gamma * sigma
UpperCAmelCase : List[str] = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : int = sample_hat + sigma_hat * model_output
UpperCAmelCase : Dict = (sample_hat - pred_original_sample) / sigma_hat
UpperCAmelCase : int = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Tuple, __A : KarrasVeSchedulerState, __A : jnp.ndarray, __A : float, __A : float, __A : jnp.ndarray, __A : jnp.ndarray, __A : jnp.ndarray, __A : bool = True, ):
UpperCAmelCase : Tuple = sample_prev + sigma_prev * model_output
UpperCAmelCase : List[str] = (sample_prev - pred_original_sample) / sigma_prev
UpperCAmelCase : Union[str, Any] = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative, state)
return FlaxKarrasVeOutput(prev_sample=__A, derivative=__A, state=__A )
def __magic_name__ ( self : Optional[Any], __A : KarrasVeSchedulerState, __A : Optional[int], __A : int, __A : Union[str, Any] ):
raise NotImplementedError()
| 336 | 1 |
"""simple docstring"""
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def lowerCamelCase__ ( a__ : int , a__ : int , a__ : int , a__ : int=None , a__ : Union[str, Any]=None , a__ : List[Any]=None , a__ : List[str]=None , a__ : int=None , ) -> Any:
if attention_mask is None:
UpperCamelCase_ = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
UpperCamelCase_ = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
UpperCamelCase_ = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=a__ )
if decoder_head_mask is None:
UpperCamelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
if cross_attn_head_mask is None:
UpperCamelCase_ = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=a__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowercase_ :
def __init__( self , __UpperCamelCase , __UpperCamelCase=1_3 , __UpperCamelCase=7 , __UpperCamelCase=True , __UpperCamelCase=False , __UpperCamelCase=9_9 , __UpperCamelCase=1_6 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="relu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=0.0 , __UpperCamelCase=0.0 , __UpperCamelCase=2_0 , __UpperCamelCase=2 , __UpperCamelCase=1 , __UpperCamelCase=0 , ):
"""simple docstring"""
UpperCamelCase_ = parent
UpperCamelCase_ = batch_size
UpperCamelCase_ = seq_length
UpperCamelCase_ = is_training
UpperCamelCase_ = use_labels
UpperCamelCase_ = vocab_size
UpperCamelCase_ = hidden_size
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = hidden_dropout_prob
UpperCamelCase_ = attention_probs_dropout_prob
UpperCamelCase_ = encoder_layerdrop
UpperCamelCase_ = decoder_layerdrop
UpperCamelCase_ = max_position_embeddings
UpperCamelCase_ = eos_token_id
UpperCamelCase_ = pad_token_id
UpperCamelCase_ = bos_token_id
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ = self.eos_token_id # Eos Token
UpperCamelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
UpperCamelCase_ = input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ = decoder_input_ids.clamp(self.pad_token_id + 1 )
UpperCamelCase_ = self.get_config()
UpperCamelCase_ = prepare_mam_aaa_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def lowerCamelCase_ ( self ):
"""simple docstring"""
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
UpperCamelCase_ = inputs_dict["""input_ids"""]
UpperCamelCase_ = inputs_dict["""attention_mask"""]
UpperCamelCase_ = inputs_dict["""head_mask"""]
# first forward pass
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , head_mask=__UpperCamelCase , use_cache=__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCamelCase_ = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
UpperCamelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCamelCase_ = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase )["""last_hidden_state"""]
UpperCamelCase_ = model(__UpperCamelCase , attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase )[
"""last_hidden_state"""
]
# select random slice
UpperCamelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCamelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCamelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-2 ) )
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
UpperCamelCase_ = model(**__UpperCamelCase )
UpperCamelCase_ = outputs.encoder_last_hidden_state
UpperCamelCase_ = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
UpperCamelCase_ = MaMaaaEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase_ = encoder(inputs_dict["""input_ids"""] , attention_mask=inputs_dict["""attention_mask"""] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCamelCase_ = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
UpperCamelCase_ = MaMaaaDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
UpperCamelCase_ = decoder(
input_ids=inputs_dict["""decoder_input_ids"""] , attention_mask=inputs_dict["""decoder_attention_mask"""] , encoder_hidden_states=__UpperCamelCase , encoder_attention_mask=inputs_dict["""attention_mask"""] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class lowercase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
A__ : str = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
A__ : List[str] = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
A__ : Tuple = (
{
"""conversational""": MaMaaaForConditionalGeneration,
"""feature-extraction""": MaMaaaModel,
"""summarization""": MaMaaaForConditionalGeneration,
"""text2text-generation""": MaMaaaForConditionalGeneration,
"""translation""": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
A__ : Dict = True
A__ : Optional[int] = True
A__ : str = False
A__ : Dict = False
def lowerCamelCase_ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModelTester(self )
UpperCamelCase_ = ConfigTester(self , config_class=__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
UpperCamelCase_ = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
UpperCamelCase_ , UpperCamelCase_ = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
UpperCamelCase_ = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
UpperCamelCase_ = copy.deepcopy(self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
if not self.is_encoder_decoder:
UpperCamelCase_ = inputs["""input_ids"""]
del inputs["input_ids"]
else:
UpperCamelCase_ = inputs["""input_ids"""]
UpperCamelCase_ = inputs.get("""decoder_input_ids""" , __UpperCamelCase )
del inputs["input_ids"]
inputs.pop("""decoder_input_ids""" , __UpperCamelCase )
UpperCamelCase_ = model.get_input_embeddings()
if not self.is_encoder_decoder:
UpperCamelCase_ = wte(__UpperCamelCase )
else:
UpperCamelCase_ = wte(__UpperCamelCase )
UpperCamelCase_ = wte(__UpperCamelCase )
with torch.no_grad():
model(**__UpperCamelCase )[0]
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ , UpperCamelCase_ = self.model_tester.prepare_config_and_inputs()
UpperCamelCase_ = input_dict["""input_ids"""]
UpperCamelCase_ = input_ids.ne(1 ).to(__UpperCamelCase )
UpperCamelCase_ = MaMaaaForConditionalGeneration(__UpperCamelCase ).eval().to(__UpperCamelCase )
if torch_device == "cuda":
model.half()
model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase )
model.generate(num_beams=4 , do_sample=__UpperCamelCase , early_stopping=__UpperCamelCase , num_return_sequences=3 )
def lowerCamelCase__ ( a__ : str ) -> Any:
return torch.tensor(a__ , dtype=torch.long , device=a__ )
_A = 1E-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowercase_ ( unittest.TestCase ):
@cached_property
def lowerCamelCase_ ( self ):
"""simple docstring"""
return MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaModel.from_pretrained("""facebook/m2m100_418M""" ).to(__UpperCamelCase )
UpperCamelCase_ = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ = prepare_mam_aaa_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )[0]
UpperCamelCase_ = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
UpperCamelCase_ = torch.tensor(
[[-0.7_780, -0.1_676, 0.1_038], [-6.7_556, -1.3_992, 0.0_567], [-7.5_383, -0.5_920, -0.2_779]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__UpperCamelCase )
# change to intended input
UpperCamelCase_ = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
UpperCamelCase_ = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
UpperCamelCase_ = prepare_mam_aaa_inputs_dict(model.config , __UpperCamelCase , __UpperCamelCase )
with torch.no_grad():
UpperCamelCase_ = model(**__UpperCamelCase )[0]
UpperCamelCase_ = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __UpperCamelCase )
# change to expected output here
UpperCamelCase_ = torch.tensor(
[[-1.0_448, -1.0_411, 3.7_992], [-3.2_191, -3.2_386, -1.3_451], [-3.6_210, -3.5_993, 0.4_925]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def lowerCamelCase_ ( self ):
"""simple docstring"""
UpperCamelCase_ = MaMaaaForConditionalGeneration.from_pretrained("""facebook/m2m100_418M""" ).to(__UpperCamelCase )
UpperCamelCase_ = MaMaaaTokenizer.from_pretrained("""facebook/m2m100_418M""" , src_lang="""fr""" , tgt_lang="""en""" )
UpperCamelCase_ = [
"""L'affaire NSA souligne l'absence totale de débat sur le renseignement""",
"""Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.""",
"""Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"""
""" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"""
""" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.""",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
UpperCamelCase_ = tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors="""pt""" )
UpperCamelCase_ = model.generate(
input_ids=dct["""input_ids"""].to(__UpperCamelCase ) , attention_mask=dct["""attention_mask"""].to(__UpperCamelCase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("""en""" ) , )
UpperCamelCase_ = [
"""The NSA case highlights the total absence of intelligence debate""",
"""I think there are two levels of response from the French government.""",
"""When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."""
""" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"""
""" communications in France.""",
]
UpperCamelCase_ = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert generated == expected_en
| 369 |
from .imports import is_rich_available
if is_rich_available():
from rich.traceback import install
install(show_locals=False)
else:
raise ModuleNotFoundError('''To use the rich extension, install rich with `pip install rich`''')
| 261 | 0 |
"""simple docstring"""
from manim import *
class UpperCAmelCase (_UpperCAmelCase ):
"""simple docstring"""
def _snake_case ( self ):
lowercase__: Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
lowercase__: Union[str, Any] = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
lowercase__: Optional[int] = [mem.copy() for i in range(6 )]
lowercase__: List[str] = [mem.copy() for i in range(6 )]
lowercase__: Union[str, Any] = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowercase__: int = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowercase__: List[str] = VGroup(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowercase__: str = Text('''CPU''' , font_size=24 )
lowercase__: Optional[Any] = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_UpperCAmelCase )
lowercase__: Optional[Any] = [mem.copy() for i in range(1 )]
lowercase__: int = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowercase__: Dict = Text('''GPU''' , font_size=24 )
lowercase__: Any = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
gpu.align_to(_UpperCAmelCase , _UpperCAmelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(_UpperCAmelCase )
lowercase__: Union[str, Any] = [mem.copy() for i in range(6 )]
lowercase__: Dict = VGroup(*_UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0 )
lowercase__: Optional[int] = Text('''Model''' , font_size=24 )
lowercase__: Tuple = Group(_UpperCAmelCase , _UpperCAmelCase ).arrange(_UpperCAmelCase , buff=0.5 , aligned_edge=_UpperCAmelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) , Create(_UpperCAmelCase , run_time=1 ) , )
lowercase__: List[Any] = MarkupText(
F"""First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM.""" , font_size=24 , )
lowercase__: str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
lowercase__: Optional[Any] = MarkupText(
F"""<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model""" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(_UpperCAmelCase , run_time=2.5 ) , Write(_UpperCAmelCase ) , Write(_UpperCAmelCase ) )
self.add(_UpperCAmelCase )
lowercase__: str = []
lowercase__: List[str] = []
lowercase__: List[str] = []
for i, rect in enumerate(_UpperCAmelCase ):
lowercase__: Tuple = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(_UpperCAmelCase , opacity=0.7 )
cpu_target.move_to(_UpperCAmelCase )
cpu_target.generate_target()
lowercase__: Dict = 0.46 / 4
lowercase__: str = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_UpperCAmelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=_UpperCAmelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=_UpperCAmelCase , buff=0.0 )
cpu_targs.append(_UpperCAmelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(_UpperCAmelCase ) )
second_animations.append(MoveToTarget(_UpperCAmelCase , run_time=1.5 ) )
self.play(*_UpperCAmelCase )
self.play(*_UpperCAmelCase )
self.wait()
| 177 | """simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=4 , ):
lowercase__: Dict = parent
lowercase__: List[str] = batch_size
lowercase__: Optional[Any] = seq_length
lowercase__: List[Any] = is_training
lowercase__: int = use_attention_mask
lowercase__: Tuple = use_token_type_ids
lowercase__: Union[str, Any] = use_labels
lowercase__: str = vocab_size
lowercase__: str = hidden_size
lowercase__: str = num_hidden_layers
lowercase__: Optional[int] = num_attention_heads
lowercase__: List[str] = intermediate_size
lowercase__: List[str] = hidden_act
lowercase__: Tuple = hidden_dropout_prob
lowercase__: int = attention_probs_dropout_prob
lowercase__: int = max_position_embeddings
lowercase__: Union[str, Any] = type_vocab_size
lowercase__: List[Any] = type_sequence_label_size
lowercase__: Any = initializer_range
lowercase__: str = num_choices
def _snake_case ( self ):
lowercase__: int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase__: List[Any] = None
if self.use_attention_mask:
lowercase__: Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowercase__: List[Any] = None
if self.use_token_type_ids:
lowercase__: str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowercase__: Optional[int] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self ):
lowercase__: str = self.prepare_config_and_inputs()
lowercase__, lowercase__, lowercase__, lowercase__: Optional[Any] = config_and_inputs
lowercase__: Dict = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_flax
class UpperCAmelCase (_UpperCAmelCase ,unittest.TestCase ):
"""simple docstring"""
_UpperCAmelCase :List[str] = True
_UpperCAmelCase :Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self ):
lowercase__: str = FlaxRoFormerModelTester(self )
@slow
def _snake_case ( self ):
for model_class_name in self.all_model_classes:
lowercase__: Dict = model_class_name.from_pretrained('''junnyu/roformer_chinese_small''' , from_pt=_UpperCAmelCase )
lowercase__: int = model(np.ones((1, 1) ) )
self.assertIsNotNone(_UpperCAmelCase )
@require_flax
class UpperCAmelCase (unittest.TestCase ):
"""simple docstring"""
@slow
def _snake_case ( self ):
lowercase__: Any = FlaxRoFormerForMaskedLM.from_pretrained('''junnyu/roformer_chinese_base''' )
lowercase__: Optional[int] = jnp.array([[0, 1, 2, 3, 4, 5]] )
lowercase__: List[Any] = model(_UpperCAmelCase )[0]
lowercase__: str = 50000
lowercase__: Tuple = (1, 6, vocab_size)
self.assertEqual(output.shape , _UpperCAmelCase )
lowercase__: List[Any] = jnp.array(
[[[-0.1_205, -1.0_265, 0.2_922], [-1.5_134, 0.1_974, 0.1_519], [-5.0_135, -3.9_003, -0.8_404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , _UpperCAmelCase , atol=1e-4 ) )
| 177 | 1 |
"""simple docstring"""
import heapq
import sys
import numpy as np
_UpperCAmelCase : Any = tuple[int, int]
class lowerCAmelCase :
def __init__( self : Optional[Any] ) -> Dict:
lowerCamelCase__ : Optional[Any] = []
lowerCamelCase__ : Any = set()
def A_ ( self : int ) -> str:
if not self.empty():
return self.elements[0][0]
else:
return float('inf' )
def A_ ( self : Tuple ) -> str:
return len(self.elements ) == 0
def A_ ( self : Union[str, Any] , UpperCAmelCase : int , UpperCAmelCase : Any ) -> Optional[Any]:
if item not in self.set:
heapq.heappush(self.elements , (priority, item) )
self.set.add(_a )
else:
# update
# print("update", item)
lowerCamelCase__ : List[str] = []
((lowerCamelCase__) , (lowerCamelCase__)) : int = heapq.heappop(self.elements )
while x != item:
temp.append((pri, x) )
((lowerCamelCase__) , (lowerCamelCase__)) : int = heapq.heappop(self.elements )
temp.append((priority, item) )
for pro, xxx in temp:
heapq.heappush(self.elements , (pro, xxx) )
def A_ ( self : str , UpperCAmelCase : str ) -> int:
if item in self.set:
self.set.remove(_a )
lowerCamelCase__ : List[Any] = []
((lowerCamelCase__) , (lowerCamelCase__)) : int = heapq.heappop(self.elements )
while x != item:
temp.append((pro, x) )
((lowerCamelCase__) , (lowerCamelCase__)) : int = heapq.heappop(self.elements )
for prito, yyy in temp:
heapq.heappush(self.elements , (prito, yyy) )
def A_ ( self : Dict ) -> Optional[Any]:
return self.elements[0][1]
def A_ ( self : List[str] ) -> Dict:
((lowerCamelCase__) , (lowerCamelCase__)) : str = heapq.heappop(self.elements )
self.set.remove(_a )
return (priority, item)
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> List[Any]:
# euclidean distance
lowerCamelCase__ : int = np.array(lowerCAmelCase__ )
lowerCamelCase__ : str = np.array(lowerCAmelCase__ )
return np.linalg.norm(a - b )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
# integer division by time variable
return consistent_heuristic(lowerCAmelCase__ , lowerCAmelCase__ ) // t
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase ) -> Dict:
# manhattan distance
return abs(p[0] - goal[0] ) + abs(p[1] - goal[1] )
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Tuple:
lowerCamelCase__ : str = g_function[start] + Wa * heuristics[i](lowerCAmelCase__ , lowerCAmelCase__ )
return ans
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
lowerCamelCase__ : Union[str, Any] = np.chararray((n, n) )
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
lowerCamelCase__ : Dict = '*'
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (j, (n - 1) - i) in blocks:
lowerCamelCase__ : Any = '#'
lowerCamelCase__ : int = '-'
lowerCamelCase__ : List[str] = back_pointer[goal]
while x != start:
((lowerCamelCase__) , (lowerCamelCase__)) : List[Any] = x
# print(x)
lowerCamelCase__ : Optional[int] = '-'
lowerCamelCase__ : Optional[int] = back_pointer[x]
lowerCamelCase__ : Optional[int] = '-'
for i in range(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ ):
if (i, j) == (0, n - 1):
print(grid[i][j] , end=' ' )
print('<-- End position' , end=' ' )
else:
print(grid[i][j] , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
print('PATH TAKEN BY THE ALGORITHM IS:-' )
lowerCamelCase__ : Optional[Any] = back_pointer[goal]
while x != start:
print(lowerCAmelCase__ , end=' ' )
lowerCamelCase__ : Union[str, Any] = back_pointer[x]
print(lowerCAmelCase__ )
sys.exit()
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> Tuple:
if p[0] < 0 or p[0] > n - 1:
return False
if p[1] < 0 or p[1] > n - 1:
return False
return True
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ) -> Tuple:
for itera in range(lowerCAmelCase__ ):
open_list[itera].remove_element(lowerCAmelCase__ )
# print("s", s)
# print("j", j)
((lowerCamelCase__) , (lowerCamelCase__)) : List[str] = s
lowerCamelCase__ : Optional[int] = (x - 1, y)
lowerCamelCase__ : str = (x + 1, y)
lowerCamelCase__ : List[Any] = (x, y + 1)
lowerCamelCase__ : List[Any] = (x, y - 1)
for neighbours in [left, right, up, down]:
if neighbours not in blocks:
if valid(lowerCAmelCase__ ) and neighbours not in visited:
# print("neighbour", neighbours)
visited.add(lowerCAmelCase__ )
lowerCamelCase__ : List[str] = -1
lowerCamelCase__ : List[str] = float('inf' )
if valid(lowerCAmelCase__ ) and g_function[neighbours] > g_function[s] + 1:
lowerCamelCase__ : Any = g_function[s] + 1
lowerCamelCase__ : Tuple = s
if neighbours not in close_list_anchor:
open_list[0].put(lowerCAmelCase__ , key(lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ) )
if neighbours not in close_list_inad:
for var in range(1 , lowerCAmelCase__ ):
if key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) <= Wa * key(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ ):
open_list[j].put(
lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
def SCREAMING_SNAKE_CASE ( ) -> List[Any]:
lowerCamelCase__ : Union[str, Any] = []
for x in range(1 , 5 ):
for y in range(1 , 6 ):
some_list.append((x, y) )
for x in range(15 , 20 ):
some_list.append((x, 17) )
for x in range(10 , 19 ):
for y in range(1 , 15 ):
some_list.append((x, y) )
# L block
for x in range(1 , 4 ):
for y in range(12 , 19 ):
some_list.append((x, y) )
for x in range(3 , 13 ):
for y in range(16 , 19 ):
some_list.append((x, y) )
return some_list
_UpperCAmelCase : List[Any] = {0: consistent_heuristic, 1: heuristic_a, 2: heuristic_a}
_UpperCAmelCase : int = [
(0, 1),
(1, 1),
(2, 1),
(3, 1),
(4, 1),
(5, 1),
(6, 1),
(7, 1),
(8, 1),
(9, 1),
(10, 1),
(11, 1),
(12, 1),
(13, 1),
(14, 1),
(15, 1),
(16, 1),
(17, 1),
(18, 1),
(19, 1),
]
_UpperCAmelCase : str = make_common_ground()
_UpperCAmelCase : Optional[Any] = blocks_blk
# hyper parameters
_UpperCAmelCase : List[str] = 1
_UpperCAmelCase : Union[str, Any] = 1
_UpperCAmelCase : Any = 20
_UpperCAmelCase : List[Any] = 3 # one consistent and two other inconsistent
# start and end destination
_UpperCAmelCase : Any = (0, 0)
_UpperCAmelCase : str = (n - 1, n - 1)
_UpperCAmelCase : str = 1
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) -> Any:
lowerCamelCase__ : Any = {start: 0, goal: float('inf' )}
lowerCamelCase__ : Optional[Any] = {start: -1, goal: -1}
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : str = set()
for i in range(lowerCAmelCase__ ):
open_list.append(PriorityQueue() )
open_list[i].put(lowerCAmelCase__ , key(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) )
lowerCamelCase__ : Optional[int] = []
lowerCamelCase__ : List[str] = []
while open_list[0].minkey() < float('inf' ):
for i in range(1 , lowerCAmelCase__ ):
# print(open_list[0].minkey(), open_list[i].minkey())
if open_list[i].minkey() <= Wa * open_list[0].minkey():
global t
t += 1
if g_function[goal] <= open_list[i].minkey():
if g_function[goal] < float('inf' ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowerCamelCase__ , lowerCamelCase__ : int = open_list[i].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_inad.append(lowerCAmelCase__ )
else:
if g_function[goal] <= open_list[0].minkey():
if g_function[goal] < float('inf' ):
do_something(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
lowerCamelCase__ : List[Any] = open_list[0].top_show()
visited.add(lowerCAmelCase__ )
expand_state(
lowerCAmelCase__ , 0 , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , )
close_list_anchor.append(lowerCAmelCase__ )
print('No path found to goal' )
print()
for i in range(n - 1 , -1 , -1 ):
for j in range(lowerCAmelCase__ ):
if (j, i) in blocks:
print('#' , end=' ' )
elif (j, i) in back_pointer:
if (j, i) == (n - 1, n - 1):
print('*' , end=' ' )
else:
print('-' , end=' ' )
else:
print('*' , end=' ' )
if (j, i) == (n - 1, n - 1):
print('<-- End position' , end=' ' )
print()
print('^' )
print('Start position' )
print()
print('# is an obstacle' )
print('- is the path taken by algorithm' )
if __name__ == "__main__":
multi_a_star(start, goal, n_heuristic)
| 361 |
import json
import logging
import os
import sys
from pathlib import Path
import finetune_rag
from transformers.file_utils import is_apex_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
require_ray,
require_torch_gpu,
require_torch_multi_gpu,
)
logging.basicConfig(level=logging.DEBUG)
_UpperCAmelCase : List[str] = logging.getLogger()
_UpperCAmelCase : int = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowerCAmelCase ( __UpperCamelCase ):
def A_ ( self : Optional[int] , UpperCAmelCase : Optional[int] ) -> List[Any]:
os.makedirs(UpperCAmelCase , exist_ok=UpperCAmelCase )
lowerCamelCase__ : Tuple = {'source': 'What is love ?', 'target': 'life'}
lowerCamelCase__ : str = {'train': 12, 'val': 2, 'test': 2}
for split in ["train", "test", "val"]:
for field in ["source", "target"]:
lowerCamelCase__ : Optional[int] = '\n'.join([contents[field]] * n_lines[split] )
with open(os.path.join(UpperCAmelCase , F"""{split}.{field}""" ) , 'w' ) as f:
f.write(UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : int , UpperCAmelCase : str = "pytorch" ) -> str:
lowerCamelCase__ : Union[str, Any] = self.get_auto_remove_tmp_dir()
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'output' )
lowerCamelCase__ : int = os.path.join(UpperCAmelCase , 'data' )
self._create_dummy_data(data_dir=UpperCAmelCase )
lowerCamelCase__ : Dict = F"""
--data_dir {data_dir} \
--output_dir {output_dir} \
--model_name_or_path facebook/rag-sequence-base \
--model_type rag_sequence \
--do_train \
--do_predict \
--n_val -1 \
--val_check_interval 1.0 \
--train_batch_size 2 \
--eval_batch_size 1 \
--max_source_length 25 \
--max_target_length 25 \
--val_max_target_length 25 \
--test_max_target_length 25 \
--label_smoothing 0.1 \
--dropout 0.1 \
--attention_dropout 0.1 \
--weight_decay 0.001 \
--adam_epsilon 1e-08 \
--max_grad_norm 0.1 \
--lr_scheduler polynomial \
--learning_rate 3e-04 \
--num_train_epochs 1 \
--warmup_steps 4 \
--gradient_accumulation_steps 1 \
--distributed-port 8787 \
--use_dummy_dataset 1 \
--distributed_retriever {distributed_retriever} \
""".split()
if gpus > 0:
testargs.append(F"""--gpus={gpus}""" )
if is_apex_available():
testargs.append('--fp16' )
else:
testargs.append('--gpus=0' )
testargs.append('--distributed_backend=ddp_cpu' )
testargs.append('--num_processes=2' )
lowerCamelCase__ : Optional[Any] = [sys.executable, str(Path(finetune_rag.__file__ ).resolve() )] + testargs
execute_subprocess_async(UpperCAmelCase , env=self.get_env() )
lowerCamelCase__ : Dict = os.path.join(UpperCAmelCase , 'metrics.json' )
with open(UpperCAmelCase ) as f:
lowerCamelCase__ : Dict = json.load(UpperCAmelCase )
return result
@require_torch_gpu
def A_ ( self : Optional[Any] ) -> Optional[int]:
lowerCamelCase__ : List[str] = self._run_finetune(gpus=1 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
def A_ ( self : Any ) -> List[Any]:
lowerCamelCase__ : str = self._run_finetune(gpus=2 )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_gpu
@require_ray
def A_ ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase__ : Union[str, Any] = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
@require_torch_multi_gpu
@require_ray
def A_ ( self : Dict ) -> List[str]:
lowerCamelCase__ : Tuple = self._run_finetune(gpus=1 , distributed_retriever='ray' )
self.assertGreaterEqual(result['test'][0]['test_avg_em'] , 0.2 )
| 45 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def __lowerCAmelCase ( *lowerCamelCase__ : str ,**lowerCamelCase__ : Optional[int] ):
pass
def a_ ( lowerCamelCase ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
lowerCAmelCase__ : Tuple = (
'''https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png'''
)
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
snake_case__ = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : int ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = pipeline(
'document-question-answering' ,model=lowercase_ ,tokenizer=lowercase_ ,image_processor=lowercase_ )
UpperCAmelCase__ = INVOICE_URL
UpperCAmelCase__ = list(zip(*apply_tesseract(load_image(lowercase_ ) ,lowercase_ ,'' ) ) )
UpperCAmelCase__ = '''What is the placebo?'''
UpperCAmelCase__ = [
{
'''image''': load_image(lowercase_ ),
'''question''': question,
},
{
'''image''': image,
'''question''': question,
},
{
'''image''': image,
'''question''': question,
'''word_boxes''': word_boxes,
},
]
return dqa_pipeline, examples
def __lowerCAmelCase ( self : int ,lowerCamelCase__ : List[Any] ,lowerCamelCase__ : Any ):
UpperCAmelCase__ = dqa_pipeline(lowercase_ ,top_k=2 )
self.assertEqual(
lowercase_ ,[
[
{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ ), 'start': ANY(lowercase_ ), 'end': ANY(lowercase_ )},
{'score': ANY(lowercase_ ), 'answer': ANY(lowercase_ ), 'start': ANY(lowercase_ ), 'end': ANY(lowercase_ )},
]
]
* 3 ,)
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : Any ):
UpperCAmelCase__ = pipeline('document-question-answering' ,model='hf-internal-testing/tiny-random-layoutlmv2' )
UpperCAmelCase__ = INVOICE_URL
UpperCAmelCase__ = '''How many cats are there?'''
UpperCAmelCase__ = [
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019''', '''start''': 38, '''end''': 39},
{'''score''': 0.0_0_0_1, '''answer''': '''oy 2312/2019 DUE''', '''start''': 38, '''end''': 40},
]
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,top_k=2 )
self.assertEqual(nested_simplify(lowercase_ ,decimals=4 ) ,lowercase_ )
UpperCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(nested_simplify(lowercase_ ,decimals=4 ) ,lowercase_ )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
UpperCAmelCase__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,top_k=2 )
self.assertEqual(lowercase_ ,[] )
# We can optionnally pass directly the words and bounding boxes
UpperCAmelCase__ = '''./tests/fixtures/tests_samples/COCO/000000039769.png'''
UpperCAmelCase__ = []
UpperCAmelCase__ = []
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,words=lowercase_ ,boxes=lowercase_ ,top_k=2 )
self.assertEqual(lowercase_ ,[] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : Optional[int] ):
UpperCAmelCase__ = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,)
UpperCAmelCase__ = INVOICE_URL
UpperCAmelCase__ = '''What is the invoice number?'''
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
UpperCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
UpperCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
[
{'score': 0.9_9_4_4, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_0_0_9, 'answer': 'us-001', 'start': 16, 'end': 16},
],
]
* 2 ,)
@slow
@require_torch
@require_detectrona
@require_pytesseract
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = pipeline(
'document-question-answering' ,model='tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa' ,revision='9977165' ,max_seq_len=50 ,)
UpperCAmelCase__ = INVOICE_URL
UpperCAmelCase__ = '''What is the invoice number?'''
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
UpperCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
UpperCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
[
{'score': 0.9_9_7_4, 'answer': '1110212019', 'start': 23, 'end': 23},
{'score': 0.9_9_4_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : Dict ):
UpperCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=lowercase_ )
UpperCAmelCase__ = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=lowercase_ ,revision='3dc6de3' ,)
UpperCAmelCase__ = INVOICE_URL
UpperCAmelCase__ = '''What is the invoice number?'''
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
UpperCAmelCase__ = dqa_pipeline({'image': image, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
UpperCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
]
]
* 2 ,)
UpperCAmelCase__ = list(zip(*apply_tesseract(load_image(lowercase_ ) ,lowercase_ ,'' ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.4_2_5_1, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.0_8_1_9, 'answer': '1110212019', 'start': 23, 'end': 23},
] ,)
@slow
@require_torch
@require_pytesseract
@require_vision
def __lowerCAmelCase ( self : int ):
UpperCAmelCase__ = AutoTokenizer.from_pretrained(
'impira/layoutlm-document-qa' ,revision='3dc6de3' ,add_prefix_space=lowercase_ )
UpperCAmelCase__ = pipeline(
'document-question-answering' ,model='impira/layoutlm-document-qa' ,tokenizer=lowercase_ ,revision='3dc6de3' ,max_seq_len=50 ,)
UpperCAmelCase__ = INVOICE_URL
UpperCAmelCase__ = '''What is the invoice number?'''
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
UpperCAmelCase__ = dqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
]
]
* 2 ,)
UpperCAmelCase__ = list(zip(*apply_tesseract(load_image(lowercase_ ) ,lowercase_ ,'' ) ) )
# This model should also work if `image` is set to None
UpperCAmelCase__ = dqa_pipeline({'image': None, 'word_boxes': word_boxes, 'question': question} ,top_k=2 )
self.assertEqual(
nested_simplify(lowercase_ ,decimals=4 ) ,[
{'score': 0.9_9_9_9, 'answer': 'us-001', 'start': 16, 'end': 16},
{'score': 0.9_9_9_8, 'answer': 'us-001', 'start': 16, 'end': 16},
] ,)
@slow
@require_torch
def __lowerCAmelCase ( self : List[str] ):
UpperCAmelCase__ = pipeline(
'document-question-answering' ,model='naver-clova-ix/donut-base-finetuned-docvqa' ,tokenizer=AutoTokenizer.from_pretrained('naver-clova-ix/donut-base-finetuned-docvqa' ) ,feature_extractor='naver-clova-ix/donut-base-finetuned-docvqa' ,)
UpperCAmelCase__ = INVOICE_URL
UpperCAmelCase__ = '''What is the invoice number?'''
UpperCAmelCase__ = dqa_pipeline(image=lowercase_ ,question=lowercase_ ,top_k=2 )
self.assertEqual(nested_simplify(lowercase_ ,decimals=4 ) ,[{'answer': 'us-001'}] )
@require_tf
@unittest.skip('Document question answering not implemented in TF' )
def __lowerCAmelCase ( self : Any ):
pass
| 98 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
def __lowercase ( _a , _a=False ):
snake_case_ : List[str] = []
# fmt: off
# stem:
rename_keys.append(('''cls_token''', '''vit.embeddings.cls_token''') )
rename_keys.append(('''pos_embed''', '''vit.embeddings.position_embeddings''') )
rename_keys.append(('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight''') )
rename_keys.append(('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias''') )
# backbone
rename_keys.append(('''patch_embed.backbone.stem.conv.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.weight''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight''') )
rename_keys.append(('''patch_embed.backbone.stem.norm.bias''', '''vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias''') )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight") )
rename_keys.append((f"patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias", f"vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias") )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append((f"blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ : Optional[int] = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
# fmt: on
return rename_keys
def __lowercase ( _a , _a , _a=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ : List[str] = ''''''
else:
snake_case_ : Dict = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ : List[str] = state_dict.pop(f"blocks.{i}.attn.qkv.weight" )
snake_case_ : Optional[int] = state_dict.pop(f"blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
snake_case_ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case_ : Dict = in_proj_bias[: config.hidden_size]
snake_case_ : str = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ : Optional[int] = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ : Dict = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ : str = in_proj_bias[-config.hidden_size :]
def __lowercase ( _a ):
snake_case_ : Dict = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(_a , _a )
def __lowercase ( _a , _a , _a ):
snake_case_ : Union[str, Any] = dct.pop(_a )
snake_case_ : Union[str, Any] = val
def __lowercase ( ):
snake_case_ : Any = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ : Tuple = Image.open(requests.get(_a , stream=_a ).raw )
return im
@torch.no_grad()
def __lowercase ( _a , _a , _a=False ):
snake_case_ : str = BitConfig(
global_padding='''same''' , layer_type='''bottleneck''' , depths=(3, 4, 9) , out_features=['''stage3'''] , embedding_dynamic_padding=_a , )
snake_case_ : Tuple = ViTHybridConfig(backbone_config=_a , image_size=384 , num_labels=1_000 )
snake_case_ : int = False
# load original model from timm
snake_case_ : str = timm.create_model(_a , pretrained=_a )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ : Any = timm_model.state_dict()
if base_model:
remove_classification_head_(_a )
snake_case_ : int = create_rename_keys(_a , _a )
for src, dest in rename_keys:
rename_key(_a , _a , _a )
read_in_q_k_v(_a , _a , _a )
snake_case_ : Optional[Any] = '''huggingface/label-files'''
snake_case_ : Any = '''imagenet-1k-id2label.json'''
snake_case_ : Dict = json.load(open(hf_hub_download(_a , _a , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ : Dict = {int(_a ): v for k, v in idalabel.items()}
snake_case_ : Optional[int] = idalabel
snake_case_ : Optional[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case_ : Optional[Any] = ViTHybridModel(_a ).eval()
else:
snake_case_ : Any = ViTHybridForImageClassification(_a ).eval()
model.load_state_dict(_a )
# create image processor
snake_case_ : Optional[Any] = create_transform(**resolve_data_config({} , model=_a ) )
snake_case_ : List[Any] = transform.transforms
snake_case_ : Optional[Any] = {
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
snake_case_ : List[Any] = ViTHybridImageProcessor(
do_resize=_a , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=_a , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=_a , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case_ : Optional[int] = prepare_img()
snake_case_ : Optional[int] = transform(_a ).unsqueeze(0 )
snake_case_ : int = processor(_a , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(_a , _a )
# verify logits
with torch.no_grad():
snake_case_ : List[str] = model(_a )
snake_case_ : Any = outputs.logits
print('''Predicted class:''' , logits.argmax(-1 ).item() )
if base_model:
snake_case_ : Optional[Any] = timm_model.forward_features(_a )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(_a , outputs.pooler_output , atol=1E-3 )
else:
snake_case_ : int = timm_model(_a )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(_a , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(_a ).mkdir(exist_ok=_a )
print(f"Saving model {vit_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(_a )
print(f"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_a )
if push_to_hub:
print(f"Pushing model and processor to the hub {vit_name}" )
model.push_to_hub(f"ybelkada/{vit_name}" )
processor.push_to_hub(f"ybelkada/{vit_name}" )
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--vit_name''',
default='''vit_base_r50_s16_384''',
type=str,
help='''Name of the hybrid ViT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether to upload the model to the HuggingFace hub.'''
)
lowercase__ : Any = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 264 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
A_ = logging.get_logger(__name__)
A_ = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
A_ = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCAmelCase__ (snake_case__ : List[Any] , snake_case__ : Optional[int] , snake_case__ : Union[str, Any] , snake_case__ : List[str] , snake_case__ : List[str] ):
"""simple docstring"""
for attribute in key.split(""".""" ):
_snake_case : List[Any] = getattr(lowercase_ , lowercase_ )
if weight_type is not None:
_snake_case : Union[str, Any] = getattr(lowercase_ , lowercase_ ).shape
else:
_snake_case : List[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}" )
if weight_type == "weight":
_snake_case : List[str] = value
elif weight_type == "weight_g":
_snake_case : List[str] = value
elif weight_type == "weight_v":
_snake_case : Tuple = value
elif weight_type == "bias":
_snake_case : Optional[Any] = value
elif weight_type == "running_mean":
_snake_case : str = value
elif weight_type == "running_var":
_snake_case : Dict = value
elif weight_type == "num_batches_tracked":
_snake_case : Tuple = value
elif weight_type == "inv_freq":
_snake_case : Tuple = value
else:
_snake_case : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
"""simple docstring"""
_snake_case : int = []
_snake_case : int = fairseq_model.state_dict()
_snake_case : Any = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_snake_case : Optional[int] = False
if "conv_layers" in name:
load_conv_layer(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , hf_model.config.feat_extract_norm == """group""" , )
_snake_case : str = True
else:
for key, mapped_key in MAPPING.items():
_snake_case : Dict = """wav2vec2_conformer.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
_snake_case : int = True
if "*" in mapped_key:
_snake_case : Optional[Any] = name.split(lowercase_ )[0].split(""".""" )[-2]
_snake_case : Tuple = mapped_key.replace("""*""" , lowercase_ )
if "pos_bias_u" in name:
_snake_case : Optional[int] = None
elif "pos_bias_v" in name:
_snake_case : Dict = None
elif "weight_g" in name:
_snake_case : str = """weight_g"""
elif "weight_v" in name:
_snake_case : List[str] = """weight_v"""
elif "bias" in name:
_snake_case : Union[str, Any] = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_snake_case : List[Any] = """weight"""
elif "running_mean" in name:
_snake_case : List[str] = """running_mean"""
elif "inv_freq" in name:
_snake_case : Any = """inv_freq"""
elif "running_var" in name:
_snake_case : str = """running_var"""
elif "num_batches_tracked" in name:
_snake_case : Optional[int] = """num_batches_tracked"""
else:
_snake_case : Optional[int] = None
set_recursively(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
continue
if not is_used:
unused_weights.append(lowercase_ )
logger.warning(F"Unused weights: {unused_weights}" )
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : Any , snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Tuple ):
"""simple docstring"""
_snake_case : Dict = full_name.split("""conv_layers.""" )[-1]
_snake_case : Optional[Any] = name.split(""".""" )
_snake_case : Optional[int] = int(items[0] )
_snake_case : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
_snake_case : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
_snake_case : Optional[int] = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
_snake_case : List[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
_snake_case : str = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(lowercase_ )
@torch.no_grad()
def UpperCAmelCase__ (snake_case__ : Optional[int] , snake_case__ : int , snake_case__ : Any=None , snake_case__ : Dict=None , snake_case__ : Optional[int]=True ):
"""simple docstring"""
if config_path is not None:
_snake_case : List[str] = WavaVecaConformerConfig.from_pretrained(lowercase_ , hidden_act="""swish""" )
else:
_snake_case : Union[str, Any] = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_snake_case : List[str] = """rotary"""
if is_finetuned:
if dict_path:
_snake_case : int = Dictionary.load(lowercase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_snake_case : List[Any] = target_dict.pad_index
_snake_case : Optional[Any] = target_dict.bos_index
_snake_case : Tuple = target_dict.eos_index
_snake_case : Optional[int] = len(target_dict.symbols )
_snake_case : Union[str, Any] = os.path.join(lowercase_ , """vocab.json""" )
if not os.path.isdir(lowercase_ ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(lowercase_ ) )
return
os.makedirs(lowercase_ , exist_ok=lowercase_ )
_snake_case : Tuple = target_dict.indices
# fairseq has the <pad> and <s> switched
_snake_case : List[str] = 0
_snake_case : Optional[int] = 1
with open(lowercase_ , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(lowercase_ , lowercase_ )
_snake_case : Any = WavaVecaCTCTokenizer(
lowercase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=lowercase_ , )
_snake_case : Union[str, Any] = True if config.feat_extract_norm == """layer""" else False
_snake_case : Optional[int] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=lowercase_ , return_attention_mask=lowercase_ , )
_snake_case : Optional[Any] = WavaVecaProcessor(feature_extractor=lowercase_ , tokenizer=lowercase_ )
processor.save_pretrained(lowercase_ )
_snake_case : Tuple = WavaVecaConformerForCTC(lowercase_ )
else:
_snake_case : Tuple = WavaVecaConformerForPreTraining(lowercase_ )
if is_finetuned:
_snake_case , _snake_case , _snake_case : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
_snake_case : List[Any] = argparse.Namespace(task="""audio_pretraining""" )
_snake_case : int = fairseq.tasks.setup_task(lowercase_ )
_snake_case , _snake_case , _snake_case : Optional[int] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowercase_ )
_snake_case : List[Any] = model[0].eval()
recursively_load_weights(lowercase_ , lowercase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowercase_ )
if __name__ == "__main__":
A_ = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
A_ = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 362 |
"""simple docstring"""
import torch
from torch import nn
class lowercase( nn.Module ):
'''simple docstring'''
def __init__( self: Any, a_: List[str], a_: Union[str, Any], a_: int, a_: int, a_: List[Any]=1, a_: Union[str, Any]=False ):
'''simple docstring'''
super().__init__()
_snake_case : int = n_token
_snake_case : Tuple = d_embed
_snake_case : List[str] = d_proj
_snake_case : Optional[int] = cutoffs + [n_token]
_snake_case : Any = [0] + self.cutoffs
_snake_case : Tuple = div_val
_snake_case : Optional[int] = self.cutoffs[0]
_snake_case : Union[str, Any] = len(self.cutoffs ) - 1
_snake_case : Union[str, Any] = self.shortlist_size + self.n_clusters
if self.n_clusters > 0:
_snake_case : List[Any] = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed ) )
_snake_case : Tuple = nn.Parameter(torch.zeros(self.n_clusters ) )
_snake_case : Any = nn.ModuleList()
_snake_case : Optional[Any] = nn.ParameterList()
if div_val == 1:
for i in range(len(self.cutoffs ) ):
if d_proj != d_embed:
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
else:
self.out_projs.append(a_ )
self.out_layers.append(nn.Linear(a_, a_ ) )
else:
for i in range(len(self.cutoffs ) ):
_snake_case , _snake_case : List[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Union[str, Any] = d_embed // (div_val**i)
self.out_projs.append(nn.Parameter(torch.FloatTensor(a_, a_ ) ) )
self.out_layers.append(nn.Linear(a_, r_idx - l_idx ) )
_snake_case : Optional[int] = keep_order
def UpperCamelCase_ ( self: str, a_: Union[str, Any], a_: Dict, a_: int, a_: Tuple ):
'''simple docstring'''
if proj is None:
_snake_case : List[Any] = nn.functional.linear(a_, a_, bias=a_ )
else:
# if CUDA_MAJOR <= 9 and CUDA_MINOR <= 1:
_snake_case : List[Any] = nn.functional.linear(a_, proj.t().contiguous() )
_snake_case : Tuple = nn.functional.linear(a_, a_, bias=a_ )
# else:
# logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
# if bias is not None:
# logit = logit + bias
return logit
def UpperCamelCase_ ( self: Dict, a_: Dict, a_: str=None, a_: Union[str, Any]=False ):
'''simple docstring'''
if labels is not None:
# Shift so that tokens < n predict n
_snake_case : int = hidden[..., :-1, :].contiguous()
_snake_case : List[Any] = labels[..., 1:].contiguous()
_snake_case : str = hidden.view(-1, hidden.size(-1 ) )
_snake_case : Dict = labels.view(-1 )
if hidden.size(0 ) != labels.size(0 ):
raise RuntimeError("""Input and labels should have the same size in the batch dimension.""" )
else:
_snake_case : int = hidden.view(-1, hidden.size(-1 ) )
if self.n_clusters == 0:
_snake_case : Tuple = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
if labels is not None:
_snake_case : Dict = labels != -100
_snake_case : str = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : str = (
-nn.functional.log_softmax(a_, dim=-1 )[mask].gather(1, labels[mask].unsqueeze(1 ) ).squeeze(1 )
)
else:
_snake_case : Optional[int] = nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : Optional[int] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[int] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : List[Any] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : Tuple = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : Optional[int] = self.out_layers[i].weight
_snake_case : int = self.out_layers[i].bias
if i == 0:
_snake_case : List[str] = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : int = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : Any = weights[0], biases[0], self.out_projs[0]
_snake_case : List[str] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Union[str, Any] = nn.functional.log_softmax(a_, dim=1 )
if labels is None:
_snake_case : Tuple = hidden.new_empty((head_logit.size(0 ), self.n_token) )
else:
_snake_case : Dict = torch.zeros_like(a_, dtype=hidden.dtype, device=hidden.device )
_snake_case : Union[str, Any] = 0
_snake_case : Optional[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : Dict = cutoff_values[i], cutoff_values[i + 1]
if labels is not None:
_snake_case : Dict = (labels >= l_idx) & (labels < r_idx)
_snake_case : int = mask_i.nonzero().squeeze()
if indices_i.numel() == 0:
continue
_snake_case : List[str] = labels.index_select(0, a_ ) - l_idx
_snake_case : List[str] = head_logprob.index_select(0, a_ )
_snake_case : List[str] = hidden.index_select(0, a_ )
else:
_snake_case : List[str] = hidden
if i == 0:
if labels is not None:
_snake_case : Dict = head_logprob_i.gather(1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : Dict = weights[i], biases[i], self.out_projs[i]
_snake_case : int = self._compute_logit(a_, a_, a_, a_ )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = self.cutoffs[0] + i - 1 # No probability for the head cluster
if labels is not None:
_snake_case : Dict = head_logprob_i[:, cluster_prob_idx] + tail_logprob_i.gather(
1, target_i[:, None] ).squeeze(1 )
else:
_snake_case : Optional[Any] = head_logprob[:, cluster_prob_idx, None] + tail_logprob_i
_snake_case : Any = logprob_i
if labels is not None:
if (hasattr(self, """keep_order""" ) and self.keep_order) or keep_order:
out.index_copy_(0, a_, -logprob_i )
else:
out[offset : offset + logprob_i.size(0 )].copy_(-logprob_i )
offset += logprob_i.size(0 )
return out
def UpperCamelCase_ ( self: Union[str, Any], a_: Optional[int] ):
'''simple docstring'''
if self.n_clusters == 0:
_snake_case : Optional[int] = self._compute_logit(a_, self.out_layers[0].weight, self.out_layers[0].bias, self.out_projs[0] )
return nn.functional.log_softmax(a_, dim=-1 )
else:
# construct weights and biases
_snake_case , _snake_case : List[Any] = [], []
for i in range(len(self.cutoffs ) ):
if self.div_val == 1:
_snake_case , _snake_case : Optional[Any] = self.cutoff_ends[i], self.cutoff_ends[i + 1]
_snake_case : Optional[int] = self.out_layers[0].weight[l_idx:r_idx]
_snake_case : str = self.out_layers[0].bias[l_idx:r_idx]
else:
_snake_case : List[Any] = self.out_layers[i].weight
_snake_case : Union[str, Any] = self.out_layers[i].bias
if i == 0:
_snake_case : int = torch.cat([weight_i, self.cluster_weight], dim=0 )
_snake_case : Dict = torch.cat([bias_i, self.cluster_bias], dim=0 )
weights.append(a_ )
biases.append(a_ )
_snake_case , _snake_case , _snake_case : int = weights[0], biases[0], self.out_projs[0]
_snake_case : List[Any] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : List[Any] = hidden.new_empty((head_logit.size(0 ), self.n_token) )
_snake_case : int = nn.functional.log_softmax(a_, dim=1 )
_snake_case : List[Any] = [0] + self.cutoffs
for i in range(len(a_ ) - 1 ):
_snake_case , _snake_case : List[str] = cutoff_values[i], cutoff_values[i + 1]
if i == 0:
_snake_case : List[str] = head_logprob[:, : self.cutoffs[0]]
else:
_snake_case , _snake_case , _snake_case : List[Any] = weights[i], biases[i], self.out_projs[i]
_snake_case : Optional[int] = self._compute_logit(a_, a_, a_, a_ )
_snake_case : Any = nn.functional.log_softmax(a_, dim=1 )
_snake_case : Dict = head_logprob[:, -i] + tail_logprob_i
_snake_case : Any = logprob_i
return out
| 132 | 0 |
from __future__ import annotations
def UpperCamelCase (lowercase_: list[float] , lowercase_: list[float] ) -> Dict:
A__ : str = sorted(numsa + numsa )
A__ , A__ : Optional[Any] = divmod(len(UpperCAmelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
A_ : Optional[int] = [float(x) for x in input('Enter the elements of first array: ').split()]
A_ : Union[str, Any] = [float(x) for x in input('Enter the elements of second array: ').split()]
print(f'''The median of two arrays is: {median_of_two_arrays(array_a, array_a)}''')
| 192 |
'''simple docstring'''
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether tp freeze the encoder."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Whether to freeze the embeddings."} )
@dataclass
class snake_case :
"""simple docstring"""
_lowerCamelCase = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
_lowerCamelCase = field(
default="summarization" , metadata={"help": "Task name, summarization (or summarization_{dataset} for pegasus) or translation"} , )
_lowerCamelCase = field(
default=10_24 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_28 , metadata={
"help": (
"The maximum total sequence length for target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. "
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
)
} , )
_lowerCamelCase = field(
default=1_42 , metadata={
"help": (
"The maximum total sequence length for test target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
_lowerCamelCase = field(default=-1 , metadata={"help": "# training examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# validation examples. -1 means use all."} )
_lowerCamelCase = field(default=-1 , metadata={"help": "# test examples. -1 means use all."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Source language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "Target language id for translation."} )
_lowerCamelCase = field(default=lowercase , metadata={"help": "# num_beams to use for evaluation."} )
_lowerCamelCase = field(
default=lowercase , metadata={"help": "If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined."} , )
def __snake_case ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any , UpperCAmelCase_ : List[Any] ):
logger.info(F'''***** {split} metrics *****''' )
for key in sorted(metrics.keys() ):
logger.info(F''' {key} = {metrics[key]}''' )
save_json(UpperCAmelCase_ , os.path.join(UpperCAmelCase_ , F'''{split}_results.json''' ) )
def __snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowerCamelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
lowerCamelCase_ ,lowerCamelCase_ ,lowerCamelCase_ = parser.parse_args_into_dataclasses()
check_output_dir(UpperCAmelCase_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("Training/evaluation parameters %s" , UpperCAmelCase_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowerCamelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
assert hasattr(UpperCAmelCase_ , UpperCAmelCase_ ), F'''({config.__class__.__name__}) doesn\'t have a `{p}` attribute'''
setattr(UpperCAmelCase_ , UpperCAmelCase_ , getattr(UpperCAmelCase_ , UpperCAmelCase_ ) )
lowerCamelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
lowerCamelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=".ckpt" in model_args.model_name_or_path , config=UpperCAmelCase_ , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(UpperCAmelCase_ , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
lowerCamelCase_ = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(UpperCAmelCase_ , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase_ = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
lowerCamelCase_ = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(UpperCAmelCase_ )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
lowerCamelCase_ = SeqaSeqDataset
# Get datasets
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="train" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_train
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="val" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
lowerCamelCase_ = (
dataset_class(
UpperCAmelCase_ , type_path="test" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or "" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
lowerCamelCase_ = (
build_compute_metrics_fn(data_args.task , UpperCAmelCase_ ) if training_args.predict_with_generate else None
)
lowerCamelCase_ = SeqaSeqTrainer(
model=UpperCAmelCase_ , args=UpperCAmelCase_ , data_args=UpperCAmelCase_ , train_dataset=UpperCAmelCase_ , eval_dataset=UpperCAmelCase_ , data_collator=SeqaSeqDataCollator(
UpperCAmelCase_ , UpperCAmelCase_ , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=UpperCAmelCase_ , tokenizer=UpperCAmelCase_ , )
lowerCamelCase_ = {}
# Training
if training_args.do_train:
logger.info("*** Train ***" )
lowerCamelCase_ = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
lowerCamelCase_ = train_result.metrics
lowerCamelCase_ = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("train" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowerCamelCase_ = trainer.evaluate(metric_key_prefix="val" )
lowerCamelCase_ = data_args.n_val
lowerCamelCase_ = round(metrics["val_loss"] , 4 )
if trainer.is_world_process_zero():
handle_metrics("val" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.do_predict:
logger.info("*** Predict ***" )
lowerCamelCase_ = trainer.predict(test_dataset=UpperCAmelCase_ , metric_key_prefix="test" )
lowerCamelCase_ = test_output.metrics
lowerCamelCase_ = data_args.n_test
if trainer.is_world_process_zero():
lowerCamelCase_ = round(metrics["test_loss"] , 4 )
handle_metrics("test" , UpperCAmelCase_ , training_args.output_dir )
all_metrics.update(UpperCAmelCase_ )
if training_args.predict_with_generate:
lowerCamelCase_ = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=UpperCAmelCase_ , clean_up_tokenization_spaces=UpperCAmelCase_ )
lowerCamelCase_ = lmap(str.strip , UpperCAmelCase_ )
write_txt_file(UpperCAmelCase_ , os.path.join(training_args.output_dir , "test_generations.txt" ) )
if trainer.is_world_process_zero():
save_json(UpperCAmelCase_ , os.path.join(training_args.output_dir , "all_results.json" ) )
return all_metrics
def __snake_case ( UpperCAmelCase_ : Dict ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 55 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=__lowercase )
class UpperCAmelCase_ ( __lowercase ):
# `task` is not a ClassVar since we want it to be part of the `asdict` output for JSON serialization
lowerCamelCase : str = field(default='''text-classification''' , metadata={'''include_in_asdict_even_if_is_default''': True} )
lowerCamelCase : ClassVar[Features] = Features({'''text''': Value('''string''' )} )
lowerCamelCase : ClassVar[Features] = Features({'''labels''': ClassLabel} )
lowerCamelCase : str = "text"
lowerCamelCase : str = "labels"
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> Tuple:
if self.label_column not in features:
raise ValueError(F'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , UpperCAmelCase__ ):
raise ValueError(F'''Column {self.label_column} is not a ClassLabel.''' )
lowerCAmelCase = copy.deepcopy(self )
lowerCAmelCase = self.label_schema.copy()
lowerCAmelCase = features[self.label_column]
lowerCAmelCase = label_schema
return task_template
@property
def __UpperCAmelCase ( self : Tuple ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
}
| 362 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__snake_case =TypeVar("""T""")
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : int , UpperCAmelCase__ : T ) -> List[str]:
lowerCAmelCase = data
lowerCAmelCase = None
def __str__( self : Optional[int] ) -> str:
return F'''{self.data}'''
class UpperCAmelCase_ ( Generic[T] ):
def __init__( self : Optional[Any] ) -> None:
lowerCAmelCase = None
def __iter__( self : Any ) -> Iterator[T]:
lowerCAmelCase = self.top
while node:
yield node.data
lowerCAmelCase = node.next
def __str__( self : str ) -> str:
return "->".join([str(UpperCAmelCase__ ) for item in self] )
def __len__( self : Optional[int] ) -> int:
return len(tuple(iter(self ) ) )
def __UpperCAmelCase ( self : Optional[Any] ) -> bool:
return self.top is None
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : T ) -> None:
lowerCAmelCase = Node(UpperCAmelCase__ )
if not self.is_empty():
lowerCAmelCase = self.top
lowerCAmelCase = node
def __UpperCAmelCase ( self : str ) -> T:
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , UpperCAmelCase__ )
lowerCAmelCase = self.top
lowerCAmelCase = self.top.next
return pop_node.data
def __UpperCAmelCase ( self : List[Any] ) -> T:
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def __UpperCAmelCase ( self : str ) -> None:
lowerCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 55 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase__ ( unittest.TestCase):
@slow
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
SCREAMING_SNAKE_CASE : List[str] = {
'''input_ids''': tf.convert_to_tensor([[0, 2646, 1_0269, 83, 9_9942, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE : Tuple = model(_SCREAMING_SNAKE_CASE )['''last_hidden_state''']
SCREAMING_SNAKE_CASE : int = tf.TensorShape((1, 6, 768) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE : Union[str, Any] = tf.convert_to_tensor(
[
[
[0.068_1762, 0.1089_4451, 0.0677_2504],
[-0.0642_3668, 0.0236_6615, 0.0432_9344],
[-0.0605_7295, 0.0997_4135, -0.0007_0584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 182 |
'''simple docstring'''
import math
def lowercase__ ( __UpperCamelCase , __UpperCamelCase )-> float:
if initial_intensity < 0:
raise ValueError("""The value of intensity cannot be negative""" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("""In Malus Law, the angle is in the range 0-360 degrees""" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__UpperCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name='malus_law')
| 321 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__A : Optional[Any] = random.Random()
def lowercase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : str=1.0 , _SCREAMING_SNAKE_CASE : Optional[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
if rng is None:
_UpperCAmelCase = global_rng
_UpperCAmelCase = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class _a ( unittest.TestCase):
"""simple docstring"""
def __init__( self : Dict , __UpperCamelCase : str , __UpperCamelCase : List[Any]=7 , __UpperCamelCase : Optional[Any]=4_0_0 , __UpperCamelCase : int=2_0_0_0 , __UpperCamelCase : Optional[int]=2_4 , __UpperCamelCase : int=2_4 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : List[Any]=1_6_0_0_0 , __UpperCamelCase : str=True , __UpperCamelCase : Tuple=True , )->Optional[Any]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = min_seq_length
_UpperCAmelCase = max_seq_length
_UpperCAmelCase = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_UpperCAmelCase = feature_size
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = padding_value
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = return_attention_mask
_UpperCAmelCase = do_normalize
def lowercase__ ( self : Dict )->Union[str, Any]:
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : Optional[Any] , __UpperCamelCase : int=False , __UpperCamelCase : Union[str, Any]=False )->str:
def _flatten(__UpperCamelCase : int ):
return list(itertools.chain(*_A ) )
if equal_length:
_UpperCAmelCase = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_UpperCAmelCase = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_UpperCAmelCase = [np.asarray(_A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class _a ( lowerCAmelCase , unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = SpeechaTextFeatureExtractor if is_speech_available() else None
def lowercase__ ( self : str )->Any:
_UpperCAmelCase = SpeechaTextFeatureExtractionTester(self )
def lowercase__ ( self : Any , __UpperCamelCase : List[str] )->Any:
self.assertTrue(np.all(np.mean(_A , axis=0 ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_A , axis=0 ) - 1 ) < 1e-3 ) )
def lowercase__ ( self : Tuple )->Optional[int]:
# Tests that all call wrap to encode_plus and batch_encode_plus
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = [np.asarray(_A ) for speech_input in speech_inputs]
# Test feature size
_UpperCAmelCase = feature_extractor(_A , padding=_A , return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_UpperCAmelCase = feature_extractor(speech_inputs[0] , return_tensors='''np''' ).input_features
_UpperCAmelCase = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test batched
_UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_features
_UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
_UpperCAmelCase = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_UpperCAmelCase = np.asarray(_A )
_UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_features
_UpperCAmelCase = feature_extractor(_A , return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_A , _A ):
self.assertTrue(np.allclose(_A , _A , atol=1e-3 ) )
def lowercase__ ( self : Tuple )->Union[str, Any]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase = [None, 1_6, None]
for max_length, padding in zip(_A , _A ):
_UpperCAmelCase = feature_extractor(
_A , padding=_A , max_length=_A , return_attention_mask=_A )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : List[Any] )->Optional[int]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = ['longest', 'max_length', 'do_not_pad']
_UpperCAmelCase = [None, 1_6, None]
for max_length, padding in zip(_A , _A ):
_UpperCAmelCase = feature_extractor(
_A , max_length=_A , padding=_A , return_tensors='''np''' , return_attention_mask=_A )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = [np.sum(_A ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1e-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def lowercase__ ( self : Dict )->Optional[int]:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = feature_extractor(
_A , padding='''max_length''' , max_length=4 , truncation=_A , return_tensors='''np''' , return_attention_mask=_A , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def lowercase__ ( self : int )->Tuple:
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = feature_extractor(
_A , padding='''longest''' , max_length=4 , truncation=_A , return_tensors='''np''' , return_attention_mask=_A , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 2_4) )
_UpperCAmelCase = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_UpperCAmelCase = feature_extractor(
_A , padding='''longest''' , max_length=1_6 , truncation=_A , return_tensors='''np''' , return_attention_mask=_A , )
_UpperCAmelCase = inputs.input_features
_UpperCAmelCase = inputs.attention_mask
_UpperCAmelCase = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 2_4) )
def lowercase__ ( self : int )->Dict:
import torch
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
_UpperCAmelCase = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_UpperCAmelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_UpperCAmelCase = feature_extractor.pad([{'''input_features''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def lowercase__ ( self : List[str] , __UpperCamelCase : Union[str, Any] )->Union[str, Any]:
from datasets import load_dataset
_UpperCAmelCase = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_UpperCAmelCase = ds.sort('''id''' ).select(range(_A ) )[:num_samples]['audio']
return [x["array"] for x in speech_samples]
def lowercase__ ( self : Union[str, Any] )->List[Any]:
# fmt: off
_UpperCAmelCase = np.array([
-1.5_7_4_5, -1.7_7_1_3, -1.7_0_2_0, -1.6_0_6_9, -1.2_2_5_0, -1.1_1_0_5, -0.9_0_7_2, -0.8_2_4_1,
-1.2_3_1_0, -0.8_0_9_8, -0.3_3_2_0, -0.4_1_0_1, -0.7_9_8_5, -0.4_9_9_6, -0.8_2_1_3, -0.9_1_2_8,
-1.0_4_2_0, -1.1_2_8_6, -1.0_4_4_0, -0.7_9_9_9, -0.8_4_0_5, -1.2_2_7_5, -1.5_4_4_3, -1.4_6_2_5,
] )
# fmt: on
_UpperCAmelCase = self._load_datasamples(1 )
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_UpperCAmelCase = feature_extractor(_A , return_tensors='''pt''' ).input_features
self.assertEquals(input_features.shape , (1, 5_8_4, 2_4) )
self.assertTrue(np.allclose(input_features[0, 0, :3_0] , _A , atol=1e-4 ) )
| 357 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from ...utils.dataclasses import (
ComputeEnvironment,
DistributedType,
DynamoBackend,
PrecisionType,
SageMakerDistributedType,
)
from ..menu import BulletMenu
__A : Tuple = [
"EAGER",
"AOT_EAGER",
"INDUCTOR",
"NVFUSER",
"AOT_NVFUSER",
"AOT_CUDAGRAPHS",
"OFI",
"FX2TRT",
"ONNXRT",
"IPEX",
]
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Dict=None , _SCREAMING_SNAKE_CASE : List[Any]=None , _SCREAMING_SNAKE_CASE : Tuple=None ):
'''simple docstring'''
_UpperCAmelCase = True
while ask_again:
_UpperCAmelCase = input(_SCREAMING_SNAKE_CASE )
try:
if default is not None and len(_SCREAMING_SNAKE_CASE ) == 0:
return default
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
except Exception:
if error_message is not None:
print(_SCREAMING_SNAKE_CASE )
def lowercase ( _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int]=[] , _SCREAMING_SNAKE_CASE : Tuple=None , _SCREAMING_SNAKE_CASE : Dict=0 ):
'''simple docstring'''
_UpperCAmelCase = BulletMenu(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = menu.run(default_choice=_SCREAMING_SNAKE_CASE )
return convert_value(_SCREAMING_SNAKE_CASE ) if convert_value is not None else result
def lowercase ( _SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return ComputeEnvironment(['''LOCAL_MACHINE''', '''AMAZON_SAGEMAKER'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DistributedType(['''NO''', '''MULTI_CPU''', '''MULTI_XPU''', '''MULTI_GPU''', '''MULTI_NPU''', '''TPU'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return DynamoBackend(DYNAMO_BACKENDS[value] ).value
def lowercase ( _SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return PrecisionType(['''no''', '''fp16''', '''bf16''', '''fp8'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE )
return SageMakerDistributedType(['''NO''', '''DATA_PARALLEL''', '''MODEL_PARALLEL'''][value] )
def lowercase ( _SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
return {"yes": True, "no": False}[value.lower()]
class _a ( argparse.RawDescriptionHelpFormatter):
"""simple docstring"""
def lowercase__ ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[Any] )->Optional[int]:
_UpperCAmelCase = super()._format_usage(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = usage.replace('''<command> [<args>] ''' , '''''' )
return usage
| 326 | 0 |
'''simple docstring'''
from collections import deque
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Tuple ,_a : str ,_a : int ,_a : int ):
'''simple docstring'''
_a : Any = process_name # process name
_a : Union[str, Any] = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_a : Union[str, Any] = arrival_time
_a : Union[str, Any] = burst_time # remaining burst time
_a : Optional[Any] = 0 # total time of the process wait in ready queue
_a : Tuple = 0 # time from arrival time to completion time
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict ,_a : int ,_a : list[int] ,_a : deque[Process] ,_a : int ,):
'''simple docstring'''
_a : Dict = number_of_queues
# time slice of queues that round robin algorithm applied
_a : List[Any] = time_slices
# unfinished process is in this ready_queue
_a : Dict = queue
# current time
_a : Any = current_time
# finished process is in this sequence queue
_a : deque[Process] = deque()
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
_a : Optional[Any] = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def __lowercase ( self : Union[str, Any] ,_a : list[Process] ):
'''simple docstring'''
_a : Union[str, Any] = []
for i in range(len(_a ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def __lowercase ( self : List[Any] ,_a : list[Process] ):
'''simple docstring'''
_a : Optional[Any] = []
for i in range(len(_a ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def __lowercase ( self : Tuple ,_a : list[Process] ):
'''simple docstring'''
_a : Optional[Any] = []
for i in range(len(_a ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def __lowercase ( self : Dict ,_a : deque[Process] ):
'''simple docstring'''
return [q.burst_time for q in queue]
def __lowercase ( self : Optional[Any] ,_a : Process ):
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def __lowercase ( self : List[str] ,_a : deque[Process] ):
'''simple docstring'''
_a : deque[Process] = deque() # sequence deque of finished process
while len(_a ) != 0:
_a : List[str] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(_a )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_a : Dict = 0
# set the process's turnaround time because it is finished
_a : List[Any] = self.current_time - cp.arrival_time
# set the completion time
_a : int = self.current_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def __lowercase ( self : int ,_a : deque[Process] ,_a : int ):
'''simple docstring'''
_a : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(_a ) ):
_a : Optional[int] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(_a )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_a : str = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(_a )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_a : Tuple = 0
# set the finish time
_a : Optional[int] = self.current_time
# update the process' turnaround time because it is finished
_a : int = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(_a )
self.finish_queue.extend(_a ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_a, _a : Dict = self.round_robin(
self.ready_queue ,self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__lowerCAmelCase = Process("""P1""", 0, 5_3)
__lowerCAmelCase = Process("""P2""", 0, 1_7)
__lowerCAmelCase = Process("""P3""", 0, 6_8)
__lowerCAmelCase = Process("""P4""", 0, 2_4)
__lowerCAmelCase = 3
__lowerCAmelCase = [1_7, 2_5]
__lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"""queue""": deque([Pa, Pa, Pa, Pa])})
__lowerCAmelCase = Process("""P1""", 0, 5_3)
__lowerCAmelCase = Process("""P2""", 0, 1_7)
__lowerCAmelCase = Process("""P3""", 0, 6_8)
__lowerCAmelCase = Process("""P4""", 0, 2_4)
__lowerCAmelCase = 3
__lowerCAmelCase = [1_7, 2_5]
__lowerCAmelCase = deque([Pa, Pa, Pa, Pa])
__lowerCAmelCase = MLFQ(number_of_queues, time_slices, queue, 0)
__lowerCAmelCase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'''waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'''completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'''turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'''
)
# print sequence of finished processes
print(
f'''sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'''
)
| 271 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('''Temporarily disable the doc tests.''' )
@require_torch
@require_tf
@slow
class UpperCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __lowercase ( self : str ,_a : Path ,_a : Union[str, None] = None ,_a : Union[List[str], None] = None ,_a : Union[str, List[str], None] = None ,_a : bool = True ,):
'''simple docstring'''
_a : Optional[int] = [file for file in os.listdir(_a ) if os.path.isfile(os.path.join(_a ,_a ) )]
if identifier is not None:
_a : List[str] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_a ,_a ):
for n_ in n_identifier:
_a : Tuple = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : List[str] = ignore_files or []
ignore_files.append('__init__.py' )
_a : Tuple = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_a )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : List[str] = getattr(_a ,_a )
_a : int = doctest.DocTestSuite(_a )
_a : Any = unittest.TextTestRunner().run(_a )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : Union[str, Any] = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : int = Path('src/transformers' )
_a : List[Any] = 'modeling'
_a : Optional[Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_a ,identifier=_a ,ignore_files=_a )
def __lowercase ( self : Optional[int] ):
'''simple docstring'''
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'tokenization'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : List[str] ):
'''simple docstring'''
_a : Dict = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_a ,identifier=_a )
def __lowercase ( self : Any ):
'''simple docstring'''
_a : Tuple = Path('src/transformers' )
_a : List[Any] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_a ,n_identifier=_a )
def __lowercase ( self : Tuple ):
'''simple docstring'''
_a : List[Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_a ,ignore_files=_a ,only_modules=_a )
| 271 | 1 |
"""simple docstring"""
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = dataset
UpperCAmelCase__ : Union[str, Any] = process
UpperCAmelCase__ : List[Any] = params
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = self.dataset[i]
UpperCAmelCase__ : Any = self.process(_lowerCamelCase , **self.params )
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = loader
UpperCAmelCase__ : int = infer
UpperCAmelCase__ : Optional[Any] = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Any = loader_batch_size
# Internal bookkeeping
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Union[str, Any] = None
def __len__(self ):
"""simple docstring"""
return len(self.loader )
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : List[str] = iter(self.loader )
return self
def _a (self ):
"""simple docstring"""
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase__ : Optional[int] = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase__ : List[str] = {}
for k, element in self._loader_batch_data.items():
if isinstance(_lowerCamelCase , _lowerCamelCase ):
# Convert ModelOutput to tuple first
UpperCAmelCase__ : List[Any] = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : str = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : List[str] = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(_lowerCamelCase , _lowerCamelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase__ : Optional[Any] = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase__ : int = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase__ : str = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Tuple = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase__ : Dict = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase__ : Optional[Any] = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase__ : Union[str, Any] = self._loader_batch_data.__class__(_lowerCamelCase )
self._loader_batch_index += 1
return result
def _a (self ):
"""simple docstring"""
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase__ : str = next(self.iterator )
UpperCAmelCase__ : Union[str, Any] = self.infer(_lowerCamelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(_lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ : List[Any] = processed
else:
UpperCAmelCase__ : List[str] = list(processed.keys() )[0]
UpperCAmelCase__ : List[str] = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : Any = len(_lowerCamelCase )
else:
UpperCAmelCase__ : Union[str, Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : Optional[int] = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase__ : List[Any] = processed
UpperCAmelCase__ : Optional[int] = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=None ):
"""simple docstring"""
super().__init__(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : Tuple = iter(self.loader )
UpperCAmelCase__ : List[Any] = None
return self
def _a (self ):
"""simple docstring"""
if self.subiterator is None:
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase__ : List[str] = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase__ : Optional[Any] = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase__ : List[str] = next(self.subiterator )
return processed
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __iter__(self ):
"""simple docstring"""
UpperCAmelCase__ : str = iter(self.loader )
return self
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[str] = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : List[Any] = self.loader_batch_item()
UpperCAmelCase__ : Dict = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase__ : List[str] = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(_lowerCamelCase , torch.Tensor ):
UpperCAmelCase__ : Dict = processed
else:
UpperCAmelCase__ : List[Any] = list(processed.keys() )[0]
UpperCAmelCase__ : List[Any] = processed[key]
if isinstance(_lowerCamelCase , _lowerCamelCase ):
UpperCAmelCase__ : int = len(_lowerCamelCase )
else:
UpperCAmelCase__ : List[Any] = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase__ : str = observed_batch_size
UpperCAmelCase__ : Union[str, Any] = processed
UpperCAmelCase__ : List[Any] = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase__ : Union[str, Any] = self.loader_batch_item()
UpperCAmelCase__ : int = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
if is_last:
return accumulator
else:
UpperCAmelCase__ : Any = processed
UpperCAmelCase__ : Any = item.pop("""is_last""" )
accumulator.append(_lowerCamelCase )
return accumulator
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = dataset
UpperCAmelCase__ : Union[str, Any] = key
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
return self.dataset[i][self.key]
class lowerCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase__ : int = dataset
UpperCAmelCase__ : Any = keya
UpperCAmelCase__ : str = keya
def __len__(self ):
"""simple docstring"""
return len(self.dataset )
def __getitem__(self , _lowerCamelCase ):
"""simple docstring"""
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 166 |
"""simple docstring"""
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_A = 6_378_137.0
_A = 6_356_752.314_245
_A = 6_37_81_37
def a__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> float:
UpperCAmelCase__ : Union[str, Any] = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
UpperCAmelCase__ : List[str] = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
UpperCAmelCase__ : str = atan((1 - flattening) * tan(radians(lowerCAmelCase ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
UpperCAmelCase__ : Any = haversine_distance(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
UpperCAmelCase__ : int = (b_lata + b_lata) / 2
UpperCAmelCase__ : Any = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
UpperCAmelCase__ : Optional[Any] = (sin(lowerCAmelCase ) ** 2) * (cos(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : Optional[Any] = cos(sigma / 2 ) ** 2
UpperCAmelCase__ : Optional[int] = (sigma - sin(lowerCAmelCase )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
UpperCAmelCase__ : Optional[int] = (cos(lowerCAmelCase ) ** 2) * (sin(lowerCAmelCase ) ** 2)
UpperCAmelCase__ : str = sin(sigma / 2 ) ** 2
UpperCAmelCase__ : Union[str, Any] = (sigma + sin(lowerCAmelCase )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 166 | 1 |
'''simple docstring'''
def _UpperCamelCase ( __A = 10 , __A = 22 ) -> int:
'''simple docstring'''
UpperCamelCase__ = range(1 , __A )
UpperCamelCase__ = range(1 , __A )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(F"""{solution(1_0, 2_2) = }""")
| 80 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from tokenizers.pre_tokenizers import BertPreTokenizer, PreTokenizer
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roformer import RoFormerTokenizer
from .tokenization_utils import JiebaPreTokenizer
a__ : int = logging.get_logger(__name__)
a__ : List[str] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a__ : List[Any] = {
'vocab_file': {
'junnyu/roformer_chinese_small': 'https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/vocab.txt',
'junnyu/roformer_chinese_base': 'https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/vocab.txt',
'junnyu/roformer_chinese_char_small': (
'https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/vocab.txt'
),
'junnyu/roformer_chinese_char_base': (
'https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/vocab.txt'
),
'junnyu/roformer_small_discriminator': (
'https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/vocab.txt'
),
'junnyu/roformer_small_generator': (
'https://huggingface.co/junnyu/roformer_small_generator/resolve/main/vocab.txt'
),
}
}
a__ : Optional[Any] = {
'junnyu/roformer_chinese_small': 1_5_3_6,
'junnyu/roformer_chinese_base': 1_5_3_6,
'junnyu/roformer_chinese_char_small': 5_1_2,
'junnyu/roformer_chinese_char_base': 5_1_2,
'junnyu/roformer_small_discriminator': 1_2_8,
'junnyu/roformer_small_generator': 1_2_8,
}
a__ : str = {
'junnyu/roformer_chinese_small': {'do_lower_case': True},
'junnyu/roformer_chinese_base': {'do_lower_case': True},
'junnyu/roformer_chinese_char_small': {'do_lower_case': True},
'junnyu/roformer_chinese_char_base': {'do_lower_case': True},
'junnyu/roformer_small_discriminator': {'do_lower_case': True},
'junnyu/roformer_small_generator': {'do_lower_case': True},
}
class lowercase_ ( a__ ):
__UpperCAmelCase = VOCAB_FILES_NAMES
__UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase = PRETRAINED_INIT_CONFIGURATION
__UpperCAmelCase = RoFormerTokenizer
def __init__( self , a=None , a=None , a=True , a="[UNK]" , a="[SEP]" , a="[PAD]" , a="[CLS]" , a="[MASK]" , a=True , a=None , **a , ):
super().__init__(
a , tokenizer_file=a , do_lower_case=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , tokenize_chinese_chars=a , strip_accents=a , **a , )
UpperCamelCase__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
pre_tok_state.get("lowercase" , a ) != do_lower_case
or pre_tok_state.get("strip_accents" , a ) != strip_accents
):
UpperCamelCase__ = getattr(a , pre_tok_state.pop("type" ) )
UpperCamelCase__ = do_lower_case
UpperCamelCase__ = strip_accents
UpperCamelCase__ = pre_tok_class(**a )
UpperCamelCase__ = do_lower_case
def __getstate__( self ):
UpperCamelCase__ = self.__dict__.copy()
UpperCamelCase__ = BertPreTokenizer()
return state
def __setstate__( self , a ):
UpperCamelCase__ = d
UpperCamelCase__ = self.__dict__["_tokenizer"].get_vocab()
UpperCamelCase__ = PreTokenizer.custom(JiebaPreTokenizer(a ) )
def __a ( self , a , a=None ):
UpperCamelCase__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __a ( self , a , a = None ):
UpperCamelCase__ = [self.sep_token_id]
UpperCamelCase__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __a ( self , a , a = None ):
UpperCamelCase__ = self._tokenizer.model.save(a , name=a )
return tuple(a )
def __a ( self , a , a=None , a=None , a=False , **a , ):
UpperCamelCase__ = BertPreTokenizer()
return super().save_pretrained(a , a , a , a , **a )
| 80 | 1 |
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class __A( _UpperCAmelCase ):
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE_ ):
super().__init__()
UpperCamelCase__ = nn.ModuleList(SCREAMING_SNAKE_CASE_ )
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = True , ):
for i, (image, scale, controlnet) in enumerate(zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.nets ) ):
UpperCamelCase__ = controlnet(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , )
# merge samples
if i == 0:
UpperCamelCase__ = down_samples, mid_sample
else:
UpperCamelCase__ = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def UpperCAmelCase_ (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = False , SCREAMING_SNAKE_CASE_ = None , ):
UpperCamelCase__ = 0
UpperCamelCase__ = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
SCREAMING_SNAKE_CASE_ , is_main_process=SCREAMING_SNAKE_CASE_ , save_function=SCREAMING_SNAKE_CASE_ , safe_serialization=SCREAMING_SNAKE_CASE_ , variant=SCREAMING_SNAKE_CASE_ , )
idx += 1
UpperCamelCase__ = model_path_to_save + F"_{idx}"
@classmethod
def UpperCAmelCase_ (cls , SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = 0
UpperCamelCase__ = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
UpperCamelCase__ = pretrained_model_path
while os.path.isdir(SCREAMING_SNAKE_CASE_ ):
UpperCamelCase__ = ControlNetModel.from_pretrained(SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
controlnets.append(SCREAMING_SNAKE_CASE_ )
idx += 1
UpperCamelCase__ = pretrained_model_path + F"_{idx}"
logger.info(F"{len(SCREAMING_SNAKE_CASE_ )} controlnets loaded from {pretrained_model_path}." )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError(
F"No ControlNets found under {os.path.dirname(SCREAMING_SNAKE_CASE_ )}. Expected at least {pretrained_model_path + '_0'}." )
return cls(SCREAMING_SNAKE_CASE_ )
| 355 |
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
lowerCamelCase_ = logging.get_logger(__name__)
class __A( __lowerCamelCase ):
"""simple docstring"""
def __init__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , SCREAMING_SNAKE_CASE_ , )
super().__init__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
| 178 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.