code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase_ : Any = 'examples/'
lowerCAmelCase_ : List[str] = {
'examples': (re.compile(R'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(R'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(R'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), R'\1version="VERSION",'),
'doc': (re.compile(R'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase_ : Union[str, Any] = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase_ : List[str] = 'README.md'
def _lowerCamelCase ( lowercase : str , lowercase : Union[str, Any] , lowercase : Dict ) -> int:
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.read()
_a , _a = REPLACE_PATTERNS[pattern]
_a = replace.replace("VERSION" , lowercase )
_a = re_pattern.sub(lowercase , lowercase )
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowercase )
def _lowerCamelCase ( lowercase : Optional[int] ) -> Tuple:
for folder, directories, fnames in os.walk(lowercase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowercase , lowercase ) , lowercase , pattern="examples" )
def _lowerCamelCase ( lowercase : List[str] , lowercase : Any=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowercase , lowercase , lowercase )
if not patch:
update_version_in_examples(lowercase )
def _lowerCamelCase ( ) -> Union[str, Any]:
_a = "🤗 Transformers currently provides the following architectures"
_a = "1. Want to contribute a new model?"
with open(lowercase , "r" , encoding="utf-8" , newline="\n" ) as f:
_a = f.readlines()
# Find the start of the list.
_a = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_a = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
_a = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowercase , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowercase )
def _lowerCamelCase ( ) -> Tuple:
with open(REPLACE_FILES["init"] , "r" ) as f:
_a = f.read()
_a = REPLACE_PATTERNS["init"][0].search(lowercase ).groups()[0]
return packaging.version.parse(lowercase )
def _lowerCamelCase ( lowercase : str=False ) -> int:
_a = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can't create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
_a = default_version.base_version
elif patch:
_a = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
_a = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
_a = input(F'Which version are you releasing? [{default_version}]' )
if len(lowercase ) == 0:
_a = default_version
print(F'Updating version to {version}.' )
global_version_update(lowercase , patch=lowercase )
def _lowerCamelCase ( ) -> List[Any]:
_a = get_version()
_a = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
_a = current_version.base_version
# Check with the user we got that right.
_a = input(F'Which version are we developing now? [{dev_version}]' )
if len(lowercase ) == 0:
_a = dev_version
print(F'Updating version to {version}.' )
global_version_update(lowercase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase_ : Any = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 1 |
'''simple docstring'''
import os
from typing import Dict, List, Tuple, TypeVar, Union
lowerCAmelCase_ : Optional[Any] = TypeVar('T')
lowerCAmelCase_ : int = Union[List[T], Tuple[T, ...]]
lowerCAmelCase_ : int = Union[T, List[T], Dict[str, T]]
lowerCAmelCase_ : Tuple = Union[str, bytes, os.PathLike]
| 63 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Tuple = '\n# Transformers 설치 방법\n! pip install transformers datasets\n# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
lowerCAmelCase_ : str = [{'type': 'code', 'content': INSTALL_CONTENT}]
lowerCAmelCase_ : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Optional[int] , __a : Union[str, Any] , __a : Any=13 , __a : Any=[30, 30] , __a : str=2 , __a : Union[str, Any]=3 , __a : Optional[Any]=True , __a : List[Any]=True , __a : Optional[Any]=32 , __a : Optional[Any]=5 , __a : List[Any]=4 , __a : List[str]=37 , __a : Optional[int]="gelu" , __a : Tuple=0.1 , __a : List[Any]=0.1 , __a : Any=10 , __a : Tuple=0.02 , __a : Optional[int]=3 , __a : List[Any]=None , __a : Dict=8 , __a : List[str]=10 , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = num_labels
_a = scope
_a = n_targets
_a = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_a = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_a = num_patches + 1 + self.num_detection_tokens
def UpperCamelCase__ ( self : int ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_a = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_a = []
for i in range(self.batch_size ):
_a = {}
_a = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=__a )
_a = torch.rand(self.n_targets , 4 , device=__a )
labels.append(__a )
_a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : List[str] ):
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def UpperCamelCase__ ( self : int , __a : Any , __a : str , __a : Dict ):
_a = YolosModel(config=__a )
model.to(__a )
model.eval()
_a = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def UpperCamelCase__ ( self : List[Any] , __a : List[str] , __a : Tuple , __a : List[str] ):
_a = YolosForObjectDetection(__a )
model.to(__a )
model.eval()
_a = model(pixel_values=__a )
_a = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_a = model(pixel_values=__a , labels=__a )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(YolosModel, YolosForObjectDetection) if is_torch_available() else ()
__a =(
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : List[str] , __a : Any , __a : Tuple , __a : str=False ):
_a = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_a = []
for i in range(self.model_tester.batch_size ):
_a = {}
_a = torch.ones(
size=(self.model_tester.n_targets,) , device=__a , dtype=torch.long )
_a = torch.ones(
self.model_tester.n_targets , 4 , device=__a , dtype=torch.float )
labels.append(__a )
_a = labels
return inputs_dict
def UpperCamelCase__ ( self : Tuple ):
_a = YolosModelTester(self )
_a = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Tuple ):
self.config_tester.run_common_tests()
def UpperCamelCase__ ( self : Optional[Any] ):
# YOLOS does not use inputs_embeds
pass
def UpperCamelCase__ ( self : Optional[int] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_a = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def UpperCamelCase__ ( self : Tuple ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ ( self : int ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
# in YOLOS, the seq_len is different
_a = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_a = len(__a )
# Check attention is always last and order is fine
_a = True
_a = True
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = 1
self.assertEqual(out_len + added_hidden_states , len(__a ) )
_a = outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def UpperCamelCase__ ( self : Any ):
def check_hidden_states_output(__a : Tuple , __a : Any , __a : List[str] ):
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.hidden_states
_a = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
# YOLOS has a different seq_length
_a = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__a , __a , __a )
def UpperCamelCase__ ( self : str ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*__a )
@slow
def UpperCamelCase__ ( self : List[Any] ):
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = YolosModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ) -> str:
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self : Tuple ):
return AutoImageProcessor.from_pretrained("hustvl/yolos-small" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self : Tuple ):
_a = YolosForObjectDetection.from_pretrained("hustvl/yolos-small" ).to(__a )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_a = model(inputs.pixel_values )
# verify outputs
_a = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , __a )
_a = torch.tensor(
[[-24.0248, -10.3024, -14.8290], [-42.0392, -16.8200, -27.4334], [-27.2743, -11.8154, -18.7148]] , device=__a , )
_a = torch.tensor(
[[0.2559, 0.5455, 0.4706], [0.2989, 0.7279, 0.1875], [0.7732, 0.4017, 0.4462]] , device=__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , __a , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , __a , atol=1e-4 ) )
# verify postprocessing
_a = image_processor.post_process_object_detection(
__a , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_a = torch.tensor([0.9994, 0.9790, 0.9964, 0.9972, 0.9861] ).to(__a )
_a = [75, 75, 17, 63, 17]
_a = torch.tensor([335.0609, 79.3848, 375.4216, 187.2495] ).to(__a )
self.assertEqual(len(results["scores"] ) , 5 )
self.assertTrue(torch.allclose(results["scores"] , __a , atol=1e-4 ) )
self.assertSequenceEqual(results["labels"].tolist() , __a )
self.assertTrue(torch.allclose(results["boxes"][0, :] , __a ) )
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 1 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def _lowerCamelCase ( lowercase : str ) -> Tuple:
# word like '180' or '身高' or '神'
for char in word:
_a = ord(lowercase )
if not _is_chinese_char(lowercase ):
return 0
return 1
def _lowerCamelCase ( lowercase : List[str] ) -> List[Any]:
_a = set()
for token in tokens:
_a = len(lowercase ) > 1 and is_chinese(lowercase )
if chinese_word:
word_set.add(lowercase )
_a = list(lowercase )
return word_list
def _lowerCamelCase ( lowercase : List[str] , lowercase : set() ) -> Dict:
if not chinese_word_set:
return bert_tokens
_a = max([len(lowercase ) for w in chinese_word_set] )
_a = bert_tokens
_a , _a = 0, len(lowercase )
while start < end:
_a = True
if is_chinese(bert_word[start] ):
_a = min(end - start , lowercase )
for i in range(lowercase , 1 , -1 ):
_a = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_a = "##" + bert_word[j]
_a = start + i
_a = False
break
if single_word:
start += 1
return bert_word
def _lowerCamelCase ( lowercase : List[str] , lowercase : LTP , lowercase : BertTokenizer ) -> int:
_a = []
for i in range(0 , len(lowercase ) , 100 ):
_a = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["cws"] ).cws
_a = [get_chinese_word(lowercase ) for r in res]
ltp_res.extend(lowercase )
assert len(lowercase ) == len(lowercase )
_a = []
for i in range(0 , len(lowercase ) , 100 ):
_a = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowercase , truncation=lowercase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(lowercase ) == len(lowercase )
_a = []
for input_ids, chinese_word in zip(lowercase , lowercase ):
_a = []
for id in input_ids:
_a = bert_tokenizer._convert_id_to_token(lowercase )
input_tokens.append(lowercase )
_a = add_sub_symbol(lowercase , lowercase )
_a = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowercase ):
if token[:2] == "##":
_a = token[2:]
# save chinese tokens' pos
if len(lowercase ) == 1 and _is_chinese_char(ord(lowercase ) ):
ref_id.append(lowercase )
ref_ids.append(lowercase )
assert len(lowercase ) == len(lowercase )
return ref_ids
def _lowerCamelCase ( lowercase : str ) -> Tuple:
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_a = f.readlines()
_a = [line.strip() for line in data if len(lowercase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_a = LTP(args.ltp ) # faster in GPU device
_a = BertTokenizer.from_pretrained(args.bert )
_a = prepare_ref(lowercase , lowercase , lowercase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_a = [json.dumps(lowercase ) + "\n" for ref in ref_ids]
f.writelines(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
lowerCAmelCase_ : Tuple = parser.parse_args()
main(args)
| 63 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 1 |
'''simple docstring'''
import argparse
import numpy as np
import torch
from transformers import SpeechTaHifiGan, SpeechTaHifiGanConfig, logging
logging.set_verbosity_info()
lowerCAmelCase_ : Dict = logging.get_logger('transformers.models.speecht5')
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : List[str] , lowercase : Optional[Any] ) -> Tuple:
hf_model.apply_weight_norm()
_a = checkpoint["input_conv.weight_g"]
_a = checkpoint["input_conv.weight_v"]
_a = checkpoint["input_conv.bias"]
for i in range(len(config.upsample_rates ) ):
_a = checkpoint[F'upsamples.{i}.1.weight_g']
_a = checkpoint[F'upsamples.{i}.1.weight_v']
_a = checkpoint[F'upsamples.{i}.1.bias']
for i in range(len(config.upsample_rates ) * len(config.resblock_kernel_sizes ) ):
for j in range(len(config.resblock_dilation_sizes ) ):
_a = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_g']
_a = checkpoint[F'blocks.{i}.convs1.{j}.1.weight_v']
_a = checkpoint[F'blocks.{i}.convs1.{j}.1.bias']
_a = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_g']
_a = checkpoint[F'blocks.{i}.convs2.{j}.1.weight_v']
_a = checkpoint[F'blocks.{i}.convs2.{j}.1.bias']
_a = checkpoint["output_conv.1.weight_g"]
_a = checkpoint["output_conv.1.weight_v"]
_a = checkpoint["output_conv.1.bias"]
hf_model.remove_weight_norm()
@torch.no_grad()
def _lowerCamelCase ( lowercase : Dict , lowercase : List[str] , lowercase : Any , lowercase : int=None , lowercase : List[str]=None , ) -> Any:
if config_path is not None:
_a = SpeechTaHifiGanConfig.from_pretrained(lowercase )
else:
_a = SpeechTaHifiGanConfig()
_a = SpeechTaHifiGan(lowercase )
_a = torch.load(lowercase )
load_weights(orig_checkpoint["model"]["generator"] , lowercase , lowercase )
_a = np.load(lowercase )
_a = stats[0].reshape(-1 )
_a = stats[1].reshape(-1 )
_a = torch.from_numpy(lowercase ).float()
_a = torch.from_numpy(lowercase ).float()
model.save_pretrained(lowercase )
if repo_id:
print("Pushing to the hub..." )
model.push_to_hub(lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--stats_path', required=True, default=None, type=str, help='Path to stats.npy file')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
lowerCAmelCase_ : Tuple = parser.parse_args()
convert_hifigan_checkpoint(
args.checkpoint_path,
args.stats_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 63 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 1 |
'''simple docstring'''
from math import sqrt
def _lowerCamelCase ( lowercase : int ) -> int:
_a = 0
for i in range(1 , int(sqrt(lowercase ) + 1 ) ):
if n % i == 0 and i != sqrt(lowercase ):
total += i + n // i
elif i == sqrt(lowercase ):
total += i
return total - n
def _lowerCamelCase ( lowercase : int = 1_0000 ) -> int:
_a = sum(
i
for i in range(1 , lowercase )
if sum_of_divisors(sum_of_divisors(lowercase ) ) == i and sum_of_divisors(lowercase ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 63 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 1 |
'''simple docstring'''
import torch
from diffusers import EulerDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =(EulerDiscreteScheduler,)
__a =10
def UpperCamelCase__ ( self : List[Any] , **__a : Union[str, Any] ):
_a = {
"num_train_timesteps": 11_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : int ):
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : str ):
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def UpperCamelCase__ ( self : int ):
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__a )
def UpperCamelCase__ ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def UpperCamelCase__ ( self : Any ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_a = torch.manual_seed(0 )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__a , __a )
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a , generator=__a )
_a = output.prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase__ ( self : Any ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type="v_prediction" )
_a = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps )
_a = torch.manual_seed(0 )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma
_a = sample.to(__a )
for i, t in enumerate(scheduler.timesteps ):
_a = scheduler.scale_model_input(__a , __a )
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a , generator=__a )
_a = output.prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 0.0002 ) < 1e-2
assert abs(result_mean.item() - 2.2_6_7_6e-0_6 ) < 1e-3
def UpperCamelCase__ ( self : List[str] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
_a = torch.manual_seed(0 )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_a = sample.to(__a )
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(__a , __a )
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a , generator=__a )
_a = output.prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 10.0807 ) < 1e-2
assert abs(result_mean.item() - 0.0131 ) < 1e-3
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a , use_karras_sigmas=__a )
scheduler.set_timesteps(self.num_inference_steps , device=__a )
_a = torch.manual_seed(0 )
_a = self.dummy_model()
_a = self.dummy_sample_deter * scheduler.init_noise_sigma.cpu()
_a = sample.to(__a )
for t in scheduler.timesteps:
_a = scheduler.scale_model_input(__a , __a )
_a = model(__a , __a )
_a = scheduler.step(__a , __a , __a , generator=__a )
_a = output.prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 124.52299499511719 ) < 1e-2
assert abs(result_mean.item() - 0.16213932633399963 ) < 1e-3
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 1 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : List[str] , __a : str , __a : int = 13 , __a : int = 64 , __a : int = 2 , __a : int = 3 , __a : int = 3 , __a : bool = True , __a : bool = True , __a : int = 1_28 , __a : List[str]=[16, 32, 64, 1_28] , __a : int = 7 , __a : int = 4 , __a : int = 37 , __a : str = "gelu" , __a : float = 0.1 , __a : float = 0.1 , __a : int = 10 , __a : float = 0.02 , __a : int = 2 , __a : int = 1 , __a : int = 1_28 , __a : List[int] = [2, 2, 2, 2] , __a : int = 2 , __a : int = 2 , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = is_training
_a = use_labels
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = type_sequence_label_size
_a = initializer_range
_a = encoder_stride
_a = num_attention_outputs
_a = embed_dim
_a = embed_dim + 1
_a = resolution
_a = depths
_a = hidden_sizes
_a = dim
_a = mlp_expansion_ratio
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_a = self.get_config()
return config, pixel_values, labels
def UpperCamelCase__ ( self : str ):
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def UpperCamelCase__ ( self : Any , __a : str , __a : Tuple , __a : Optional[int] ):
_a = TFEfficientFormerModel(config=__a )
_a = model(__a , training=__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCamelCase__ ( self : Union[str, Any] , __a : Union[str, Any] , __a : Union[str, Any] , __a : Tuple ):
_a = self.type_sequence_label_size
_a = TFEfficientFormerForImageClassification(__a )
_a = model(__a , labels=__a , training=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_a = 1
_a = TFEfficientFormerForImageClassification(__a )
_a = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_a = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCamelCase__ ( self : Tuple ):
_a = self.prepare_config_and_inputs()
_a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__a =(
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__a =False
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : Any ):
_a = TFEfficientFormerModelTester(self )
_a = ConfigTester(
self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def UpperCamelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="EfficientFormer does not use inputs_embeds" )
def UpperCamelCase__ ( self : List[str] ):
pass
@unittest.skip(reason="EfficientFormer does not support input and output embeddings" )
def UpperCamelCase__ ( self : str ):
pass
def UpperCamelCase__ ( self : Optional[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def UpperCamelCase__ ( self : Optional[Any] ):
def check_hidden_states_output(__a : int , __a : Union[str, Any] , __a : Optional[Any] ):
_a = model_class(__a )
_a = model(**self._prepare_for_class(__a , __a ) , training=__a )
_a = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a = getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(__a ) , __a )
if hasattr(self.model_tester , "encoder_seq_length" ):
_a = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , "chunk_length" ) and self.model_tester.chunk_length > 1:
_a = seq_length * self.model_tester.chunk_length
else:
_a = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_a = outputs.decoder_hidden_states
self.asseretIsInstance(__a , (list, tuple) )
self.assertEqual(len(__a ) , __a )
_a = getattr(self.model_tester , "seq_length" , __a )
_a = getattr(self.model_tester , "decoder_seq_length" , __a )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__a , __a , __a )
def UpperCamelCase__ ( self : Any , __a : List[Any] , __a : Dict , __a : List[str]=False ):
_a = super()._prepare_for_class(__a , __a , return_labels=__a )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCamelCase__ ( self : Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
@unittest.skip(reason="EfficientFormer does not implement masked image modeling yet" )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def UpperCamelCase__ ( self : str ):
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = TFEfficientFormerModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def UpperCamelCase__ ( self : List[str] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
_a = True
_a = getattr(self.model_tester , "seq_length" , __a )
_a = getattr(self.model_tester , "encoder_seq_length" , __a )
_a = getattr(self.model_tester , "key_length" , __a )
_a = getattr(self.model_tester , "chunk_length" , __a )
if chunk_length is not None and hasattr(self.model_tester , "num_hashes" ):
_a = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_a = True
_a = False
_a = True
_a = model_class(__a )
_a = model(**self._prepare_for_class(__a , __a ) , training=__a )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_a = True
_a = model_class(__a )
_a = model(**self._prepare_for_class(__a , __a ) , training=__a )
_a = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__a ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def UpperCamelCase__ ( self : Tuple ):
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_a = model_class(__a )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_a = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=__a )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_a = model(__a )
self.assertTrue(outputs_dict is not None )
def _lowerCamelCase ( ) -> str:
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self : Optional[Any] ):
return (
EfficientFormerImageProcessor.from_pretrained("snap-research/efficientformer-l1-300" )
if is_vision_available()
else None
)
@slow
def UpperCamelCase__ ( self : Tuple ):
_a = TFEfficientFormerForImageClassification.from_pretrained("snap-research/efficientformer-l1-300" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="tf" )
# forward pass
_a = model(**__a , training=__a )
# verify the logits
_a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
_a = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self : Optional[int] ):
_a = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"snap-research/efficientformer-l1-300" )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="tf" )
# forward pass
_a = model(**__a , training=__a )
# verify the logits
_a = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
_a = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
_a = [0] * len(lowercase )
for i in range(1 , len(lowercase ) ):
# use last results for better performance - dynamic programming
_a = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
_a = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
_a = j
return prefix_result
def _lowerCamelCase ( lowercase : str ) -> int:
return max(prefix_function(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 1 |
'''simple docstring'''
import os
try:
from .build_directory_md import good_file_paths
except ImportError:
from build_directory_md import good_file_paths # type: ignore
lowerCAmelCase_ : int = list(good_file_paths())
assert filepaths, "good_file_paths() failed!"
lowerCAmelCase_ : List[str] = [file for file in filepaths if file != file.lower()]
if upper_files:
print(f"""{len(upper_files)} files contain uppercase characters:""")
print('\n'.join(upper_files) + '\n')
lowerCAmelCase_ : Dict = [file for file in filepaths if ' ' in file]
if space_files:
print(f"""{len(space_files)} files contain space characters:""")
print('\n'.join(space_files) + '\n')
lowerCAmelCase_ : Any = [file for file in filepaths if '-' in file]
if hyphen_files:
print(f"""{len(hyphen_files)} files contain hyphen characters:""")
print('\n'.join(hyphen_files) + '\n')
lowerCAmelCase_ : Optional[Any] = [file for file in filepaths if os.sep not in file]
if nodir_files:
print(f"""{len(nodir_files)} files are not in a directory:""")
print('\n'.join(nodir_files) + '\n')
lowerCAmelCase_ : int = len(upper_files + space_files + hyphen_files + nodir_files)
if bad_files:
import sys
sys.exit(bad_files)
| 63 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 1 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(
description="Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset." )
parser.add_argument(
"--dataset_name" , type=lowercase , default="wikitext" , help="Name of the training. Explore datasets at: hf.co/datasets." , )
parser.add_argument(
"--dataset_config" , type=lowercase , default="wikitext-103-raw-v1" , help="Configuration name of the dataset." )
parser.add_argument(
"--tokenizer_name_or_path" , type=lowercase , default="sayakpaul/unigram-tokenizer-wikitext" , help="Tokenizer identifier. Can be a local filepath or a Hub identifier." , )
parser.add_argument(
"--shard_size" , type=lowercase , default=1000 , help="Number of entries to go in a single shard." , )
parser.add_argument("--split" , type=lowercase , default="train" , choices=["train", "test", "validation"] )
parser.add_argument(
"--limit" , default=lowercase , type=lowercase , help="Limit the number of shards (used for debugging)." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum sequence length. For training on TPUs, it helps to have a maximum"
" sequence length that is a multiple of 8." , )
parser.add_argument(
"--output_dir" , default="tf-tpu" , type=lowercase , help="Output directory where the TFRecord shards will be saved. If the"
" path is appended with `gs://` ('gs://tf-tpu', for example) then the TFRecord"
" shards will be directly saved to a Google Cloud Storage bucket." , )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : List[Any] ) -> Tuple:
def fn(lowercase : Optional[Any] ):
return tokenizer(examples["text"] )
return fn
def _lowerCamelCase ( lowercase : List[Any] ) -> Dict:
_a = []
for i in range(len(tokenized_data["input_ids"] ) ):
_a = {
"input_ids": tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data["input_ids"][i] ) ),
"attention_mask": tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data["attention_mask"][i] ) ),
}
_a = tf.train.Features(feature=lowercase )
_a = tf.train.Example(features=lowercase )
_a = example.SerializeToString()
records.append(lowercase )
return records
def _lowerCamelCase ( lowercase : Dict ) -> str:
_a = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
_a = min(len(lowercase ) , args.limit )
_a = dataset.select(range(lowercase ) )
print(F'Limiting the dataset to {args.limit} entries.' )
_a = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
_a = os.path.join(args.output_dir , args.split )
if not os.path.exists(lowercase ):
os.makedirs(lowercase )
else:
_a = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
_a = tokenize_function(lowercase )
_a = dataset.map(lowercase , batched=lowercase , num_proc=4 , remove_columns=["text"] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(lowercase : Tuple ):
# Concatenate all texts.
_a = {k: sum(examples[k] , [] ) for k in examples.keys()}
_a = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
_a = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
_a = {
k: [t[i : i + args.max_length] for i in range(0 , lowercase , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
_a = dataset_tokenized.map(lowercase , batched=lowercase , batch_size=1000 , num_proc=4 )
_a = 0
_a = 0
for shard in range(0 , len(lowercase ) , args.shard_size ):
_a = grouped_dataset[shard : shard + args.shard_size]
_a = len(dataset_snapshot["input_ids"] )
_a = os.path.join(lowercase , F'dataset-{shard_count}-{records_containing}.tfrecord' )
_a = get_serialized_examples(lowercase )
with tf.io.TFRecordWriter(lowercase ) as out_file:
for i in range(len(lowercase ) ):
_a = serialized_examples[i]
out_file.write(lowercase )
print("Wrote file {} containing {} records".format(lowercase , lowercase ) )
shard_count += 1
total_records += records_containing
with open(F'split-{args.split}-records-count.txt' , "w" ) as f:
print(F'Total {args.split} records: {total_records}' , file=lowercase )
if __name__ == "__main__":
lowerCAmelCase_ : Optional[Any] = parse_args()
main(args)
| 63 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import torch
import torchaudio
from datasets import load_dataset
from huggingface_hub import hf_hub_download
from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
def _lowerCamelCase ( lowercase : List[Any] ) -> str:
_a = ASTConfig()
if "10-10" in model_name:
pass
elif "speech-commands" in model_name:
_a = 128
elif "12-12" in model_name:
_a = 12
_a = 12
elif "14-14" in model_name:
_a = 14
_a = 14
elif "16-16" in model_name:
_a = 16
_a = 16
else:
raise ValueError("Model not supported" )
_a = "huggingface/label-files"
if "speech-commands" in model_name:
_a = 35
_a = "speech-commands-v2-id2label.json"
else:
_a = 527
_a = "audioset-id2label.json"
_a = json.load(open(hf_hub_download(lowercase , lowercase , repo_type="dataset" ) , "r" ) )
_a = {int(lowercase ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def _lowerCamelCase ( lowercase : Dict ) -> Optional[Any]:
if "module.v" in name:
_a = name.replace("module.v" , "audio_spectrogram_transformer" )
if "cls_token" in name:
_a = name.replace("cls_token" , "embeddings.cls_token" )
if "dist_token" in name:
_a = name.replace("dist_token" , "embeddings.distillation_token" )
if "pos_embed" in name:
_a = name.replace("pos_embed" , "embeddings.position_embeddings" )
if "patch_embed.proj" in name:
_a = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
# transformer blocks
if "blocks" in name:
_a = name.replace("blocks" , "encoder.layer" )
if "attn.proj" in name:
_a = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
_a = name.replace("attn" , "attention.self" )
if "norm1" in name:
_a = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
_a = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
_a = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
_a = name.replace("mlp.fc2" , "output.dense" )
# final layernorm
if "audio_spectrogram_transformer.norm" in name:
_a = name.replace("audio_spectrogram_transformer.norm" , "audio_spectrogram_transformer.layernorm" )
# classifier head
if "module.mlp_head.0" in name:
_a = name.replace("module.mlp_head.0" , "classifier.layernorm" )
if "module.mlp_head.1" in name:
_a = name.replace("module.mlp_head.1" , "classifier.dense" )
return name
def _lowerCamelCase ( lowercase : Any , lowercase : Tuple ) -> List[str]:
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(lowercase )
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[3] )
_a = config.hidden_size
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def _lowerCamelCase ( lowercase : Optional[int] ) -> Optional[int]:
_a = [
"module.v.head.weight",
"module.v.head.bias",
"module.v.head_dist.weight",
"module.v.head_dist.bias",
]
for k in ignore_keys:
state_dict.pop(lowercase , lowercase )
@torch.no_grad()
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : List[Any] , lowercase : Dict=False ) -> int:
_a = get_audio_spectrogram_transformer_config(lowercase )
_a = {
"ast-finetuned-audioset-10-10-0.4593": (
"https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.450": (
"https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448": (
"https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
),
"ast-finetuned-audioset-10-10-0.448-v2": (
"https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
),
"ast-finetuned-audioset-12-12-0.447": (
"https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
),
"ast-finetuned-audioset-14-14-0.443": (
"https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
),
"ast-finetuned-audioset-16-16-0.442": (
"https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
),
"ast-finetuned-speech-commands-v2": (
"https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
),
}
# load original state_dict
_a = model_name_to_url[model_name]
_a = torch.hub.load_state_dict_from_url(lowercase , map_location="cpu" )
# remove some keys
remove_keys(lowercase )
# rename some keys
_a = convert_state_dict(lowercase , lowercase )
# load 🤗 model
_a = ASTForAudioClassification(lowercase )
model.eval()
model.load_state_dict(lowercase )
# verify outputs on dummy input
# source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
_a = -4.2_67_73_93 if "speech-commands" not in model_name else -6.84_59_78
_a = 4.5_68_99_74 if "speech-commands" not in model_name else 5.5_65_45_26
_a = 1024 if "speech-commands" not in model_name else 128
_a = ASTFeatureExtractor(mean=lowercase , std=lowercase , max_length=lowercase )
if "speech-commands" in model_name:
_a = load_dataset("speech_commands" , "v0.02" , split="validation" )
_a = dataset[0]["audio"]["array"]
else:
_a = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" , )
_a , _a = torchaudio.load(lowercase )
_a = waveform.squeeze().numpy()
_a = feature_extractor(lowercase , sampling_rate=1_6000 , return_tensors="pt" )
# forward pass
_a = model(**lowercase )
_a = outputs.logits
if model_name == "ast-finetuned-audioset-10-10-0.4593":
_a = torch.tensor([-0.87_60, -7.00_42, -8.66_02] )
elif model_name == "ast-finetuned-audioset-10-10-0.450":
_a = torch.tensor([-1.19_86, -7.09_03, -8.27_18] )
elif model_name == "ast-finetuned-audioset-10-10-0.448":
_a = torch.tensor([-2.61_28, -8.00_80, -9.43_44] )
elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
_a = torch.tensor([-1.50_80, -7.45_34, -8.89_17] )
elif model_name == "ast-finetuned-audioset-12-12-0.447":
_a = torch.tensor([-0.50_50, -6.58_33, -8.08_43] )
elif model_name == "ast-finetuned-audioset-14-14-0.443":
_a = torch.tensor([-0.38_26, -7.03_36, -8.24_13] )
elif model_name == "ast-finetuned-audioset-16-16-0.442":
_a = torch.tensor([-1.21_13, -6.91_01, -8.34_70] )
elif model_name == "ast-finetuned-speech-commands-v2":
_a = torch.tensor([6.15_89, -8.05_66, -8.79_84] )
else:
raise ValueError("Unknown model name" )
if not torch.allclose(logits[0, :3] , lowercase , atol=1E-4 ):
raise ValueError("Logits don't match" )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(lowercase ).mkdir(exist_ok=lowercase )
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowercase )
print(F'Saving feature extractor to {pytorch_dump_folder_path}' )
feature_extractor.save_pretrained(lowercase )
if push_to_hub:
print("Pushing model and feature extractor to the hub..." )
model.push_to_hub(F'MIT/{model_name}' )
feature_extractor.push_to_hub(F'MIT/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase_ : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='ast-finetuned-audioset-10-10-0.4593',
type=str,
help='Name of the Audio Spectrogram Transformer model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
lowerCAmelCase_ : Dict = parser.parse_args()
convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 63 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =42
__a =None
__a =None
def _lowerCamelCase ( ) -> Node | None:
_a = Node(1 )
_a = Node(2 )
_a = Node(3 )
_a = Node(4 )
_a = Node(5 )
return tree
def _lowerCamelCase ( lowercase : Node | None ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def _lowerCamelCase ( lowercase : Node | None ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def _lowerCamelCase ( lowercase : Node | None ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def _lowerCamelCase ( lowercase : Node | None ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def _lowerCamelCase ( lowercase : Node | None ) -> Sequence[Node | None]:
_a = []
if root is None:
return output
_a = deque([root] )
while process_queue:
_a = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def _lowerCamelCase ( lowercase : Node | None , lowercase : int ) -> Sequence[Node | None]:
_a = []
def populate_output(lowercase : Node | None , lowercase : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(lowercase , lowercase )
return output
def _lowerCamelCase ( lowercase : Node | None , lowercase : int ) -> Sequence[Node | None]:
_a = []
def populate_output(lowercase : Node | None , lowercase : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(lowercase , lowercase )
return output
def _lowerCamelCase ( lowercase : Node | None ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
_a = []
_a = 0
_a = height(lowercase )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(lowercase , lowercase ) )
_a = 1
else:
output.append(get_nodes_from_right_to_left(lowercase , lowercase ) )
_a = 0
return output
def _lowerCamelCase ( ) -> None: # Main function for testing.
_a = make_tree()
print(F'In-order Traversal: {inorder(lowercase )}' )
print(F'Pre-order Traversal: {preorder(lowercase )}' )
print(F'Post-order Traversal: {postorder(lowercase )}' , "\n" )
print(F'Height of Tree: {height(lowercase )}' , "\n" )
print("Complete Level Order Traversal: " )
print(level_order(lowercase ) , "\n" )
print("Level-wise order Traversal: " )
for level in range(1 , height(lowercase ) + 1 ):
print(F'Level {level}:' , get_nodes_from_left_to_right(lowercase , level=lowercase ) )
print("\nZigZag order Traversal: " )
print(zigzag(lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 1 |
'''simple docstring'''
from __future__ import annotations
from math import pow, sqrt
def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float ) -> dict[str, float]:
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance == 0:
return {"resistance": sqrt(pow(lowercase , 2 ) - pow(lowercase , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(lowercase , 2 ) - pow(lowercase , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(lowercase , 2 ) + pow(lowercase , 2 ) )}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] ):
_a = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__a , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__a , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(__a , "num_attention_heads" ) )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : str , __a : Optional[Any] , __a : Tuple=13 , __a : List[Any]=32 , __a : int=2 , __a : Dict=3 , __a : Optional[Any]=6_40 , __a : Optional[int]=4 , __a : List[str]="silu" , __a : Tuple=3 , __a : List[Any]=32 , __a : int=0.1 , __a : Optional[int]=0.1 , __a : Tuple=0.1 , __a : Tuple=0.02 , __a : List[str]=True , __a : Tuple=True , __a : Any=10 , __a : str=None , ):
_a = parent
_a = batch_size
_a = image_size
_a = patch_size
_a = num_channels
_a = last_hidden_size
_a = num_attention_heads
_a = hidden_act
_a = conv_kernel_size
_a = output_stride
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = classifier_dropout_prob
_a = use_labels
_a = is_training
_a = num_labels
_a = initializer_range
_a = scope
def UpperCamelCase__ ( self : Optional[Any] ):
_a = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a = None
_a = None
if self.use_labels:
_a = ids_tensor([self.batch_size] , self.num_labels )
_a = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_a = self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCamelCase__ ( self : str ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCamelCase__ ( self : int , __a : Dict , __a : str , __a : Any , __a : Optional[Any] ):
_a = MobileViTModel(config=__a )
model.to(__a )
model.eval()
_a = model(__a )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase__ ( self : int , __a : Tuple , __a : Dict , __a : List[Any] , __a : Optional[int] ):
_a = self.num_labels
_a = MobileViTForImageClassification(__a )
model.to(__a )
model.eval()
_a = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase__ ( self : Dict , __a : Optional[Any] , __a : Any , __a : Dict , __a : Optional[Any] ):
_a = self.num_labels
_a = MobileViTForSemanticSegmentation(__a )
model.to(__a )
model.eval()
_a = model(__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
_a = model(__a , labels=__a )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCamelCase__ ( self : Dict ):
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =(
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
__a =(
{
'feature-extraction': MobileViTModel,
'image-classification': MobileViTForImageClassification,
'image-segmentation': MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__a =False
__a =False
__a =False
__a =False
def UpperCamelCase__ ( self : List[str] ):
_a = MobileViTModelTester(self )
_a = MobileViTConfigTester(self , config_class=__a , has_text_modality=__a )
def UpperCamelCase__ ( self : Optional[int] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def UpperCamelCase__ ( self : Any ):
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def UpperCamelCase__ ( self : Any ):
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def UpperCamelCase__ ( self : List[Any] ):
pass
def UpperCamelCase__ ( self : List[Any] ):
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = model_class(__a )
_a = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a = [*signature.parameters.keys()]
_a = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCamelCase__ ( self : Union[str, Any] ):
pass
def UpperCamelCase__ ( self : Dict ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def UpperCamelCase__ ( self : Tuple ):
def check_hidden_states_output(__a : List[str] , __a : Tuple , __a : Optional[Any] ):
_a = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
_a = model(**self._prepare_for_class(__a , __a ) )
_a = outputs.hidden_states
_a = 5
self.assertEqual(len(__a ) , __a )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
_a = 2
for i in range(len(__a ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
_a , _a = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a = True
check_hidden_states_output(__a , __a , __a )
def UpperCamelCase__ ( self : int ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
def UpperCamelCase__ ( self : Optional[int] ):
_a = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__a )
@slow
def UpperCamelCase__ ( self : int ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a = MobileViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def _lowerCamelCase ( ) -> int:
_a = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCamelCase__ ( self : Union[str, Any] ):
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def UpperCamelCase__ ( self : Tuple ):
_a = MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(__a )
_a = self.default_image_processor
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_a = model(**__a )
# verify the logits
_a = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __a )
_a = torch.tensor([-1.9364, -1.2327, -0.4653] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self : Dict ):
_a = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_a = model.to(__a )
_a = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_a = model(**__a )
_a = outputs.logits
# verify the logits
_a = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , __a )
_a = torch.tensor(
[
[[6.9713, 6.9786, 7.2422], [7.2893, 7.2825, 7.4446], [7.6580, 7.8797, 7.9420]],
[[-10.6869, -10.3250, -10.3471], [-10.4228, -9.9868, -9.7132], [-11.0405, -11.0221, -10.7318]],
[[-3.3089, -2.8539, -2.6740], [-3.2706, -2.5621, -2.5108], [-3.2534, -2.6615, -2.6651]],
] , device=__a , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __a , atol=1e-4 ) )
@slow
def UpperCamelCase__ ( self : List[str] ):
_a = MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_a = model.to(__a )
_a = MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
_a = prepare_img()
_a = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_a = model(**__a )
_a = outputs.logits.detach().cpu()
_a = image_processor.post_process_semantic_segmentation(outputs=__a , target_sizes=[(50, 60)] )
_a = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , __a )
_a = image_processor.post_process_semantic_segmentation(outputs=__a )
_a = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , __a )
| 63 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Any , __a : Collection[float] | None = None ):
if components is None:
_a = []
_a = list(__a )
def __len__( self : List[str] ):
return len(self.__components )
def __str__( self : List[str] ):
return "(" + ",".join(map(__a , self.__components ) ) + ")"
def __add__( self : str , __a : Vector ):
_a = len(self )
if size == len(__a ):
_a = [self.__components[i] + other.component(__a ) for i in range(__a )]
return Vector(__a )
else:
raise Exception("must have the same size" )
def __sub__( self : str , __a : Vector ):
_a = len(self )
if size == len(__a ):
_a = [self.__components[i] - other.component(__a ) for i in range(__a )]
return Vector(__a )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : str , __a : float ):
...
@overload
def __mul__( self : Dict , __a : Vector ):
...
def __mul__( self : List[str] , __a : float | Vector ):
if isinstance(__a , (float, int) ):
_a = [c * other for c in self.__components]
return Vector(__a )
elif isinstance(__a , __a ) and len(self ) == len(__a ):
_a = len(self )
_a = [self.__components[i] * other.component(__a ) for i in range(__a )]
return sum(__a )
else: # error case
raise Exception("invalid operand!" )
def UpperCamelCase__ ( self : Tuple ):
return Vector(self.__components )
def UpperCamelCase__ ( self : List[str] , __a : int ):
if isinstance(__a , __a ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def UpperCamelCase__ ( self : List[Any] , __a : int , __a : float ):
assert -len(self.__components ) <= pos < len(self.__components )
_a = value
def UpperCamelCase__ ( self : Union[str, Any] ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
_a = [c**2 for c in self.__components]
return math.sqrt(sum(__a ) )
def UpperCamelCase__ ( self : Optional[Any] , __a : Vector , __a : bool = False ):
_a = self * other
_a = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def _lowerCamelCase ( lowercase : int ) -> Vector:
assert isinstance(lowercase , lowercase )
return Vector([0] * dimension )
def _lowerCamelCase ( lowercase : int , lowercase : int ) -> Vector:
assert isinstance(lowercase , lowercase ) and (isinstance(lowercase , lowercase ))
_a = [0] * dimension
_a = 1
return Vector(lowercase )
def _lowerCamelCase ( lowercase : float , lowercase : Vector , lowercase : Vector ) -> Vector:
assert (
isinstance(lowercase , lowercase )
and isinstance(lowercase , lowercase )
and (isinstance(lowercase , (int, float) ))
)
return x * scalar + y
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int ) -> Vector:
random.seed(lowercase )
_a = [random.randint(lowercase , lowercase ) for _ in range(lowercase )]
return Vector(lowercase )
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self : Dict , __a : list[list[float]] , __a : int , __a : int ):
_a = matrix
_a = w
_a = h
def __str__( self : Tuple ):
_a = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : str , __a : Matrix ):
if self.__width == other.width() and self.__height == other.height():
_a = []
for i in range(self.__height ):
_a = [
self.__matrix[i][j] + other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : List[Any] , __a : Matrix ):
if self.__width == other.width() and self.__height == other.height():
_a = []
for i in range(self.__height ):
_a = [
self.__matrix[i][j] - other.component(__a , __a )
for j in range(self.__width )
]
matrix.append(__a )
return Matrix(__a , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Dict , __a : float ):
...
@overload
def __mul__( self : Optional[Any] , __a : Vector ):
...
def __mul__( self : List[Any] , __a : float | Vector ):
if isinstance(__a , __a ): # matrix-vector
if len(__a ) == self.__width:
_a = zero_vector(self.__height )
for i in range(self.__height ):
_a = [
self.__matrix[i][j] * other.component(__a )
for j in range(self.__width )
]
ans.change_component(__a , sum(__a ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(__a , (int, float) ): # matrix-scalar
_a = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(__a , self.__width , self.__height )
return None
def UpperCamelCase__ ( self : List[Any] ):
return self.__height
def UpperCamelCase__ ( self : Optional[Any] ):
return self.__width
def UpperCamelCase__ ( self : Tuple , __a : int , __a : int ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def UpperCamelCase__ ( self : Optional[Any] , __a : int , __a : int , __a : float ):
if 0 <= x < self.__height and 0 <= y < self.__width:
_a = value
else:
raise Exception("change_component: indices out of bounds" )
def UpperCamelCase__ ( self : Optional[Any] , __a : int , __a : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
_a = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(__a ) ):
_a = minor[i][:y] + minor[i][y + 1 :]
return Matrix(__a , self.__width - 1 , self.__height - 1 ).determinant()
def UpperCamelCase__ ( self : Tuple , __a : int , __a : int ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(__a , __a )
else:
raise Exception("Indices out of bounds" )
def UpperCamelCase__ ( self : List[Any] ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
_a = [
self.__matrix[0][y] * self.cofactor(0 , __a ) for y in range(self.__width )
]
return sum(__a )
def _lowerCamelCase ( lowercase : int ) -> Matrix:
_a = [[0] * n for _ in range(lowercase )]
return Matrix(lowercase , lowercase , lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : int , lowercase : int , lowercase : int ) -> Matrix:
random.seed(lowercase )
_a = [
[random.randint(lowercase , lowercase ) for _ in range(lowercase )] for _ in range(lowercase )
]
return Matrix(lowercase , lowercase , lowercase )
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCAmelCase_ : Tuple = {
'configuration_canine': ['CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CanineConfig'],
'tokenization_canine': ['CanineTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Tuple = [
'CANINE_PRETRAINED_MODEL_ARCHIVE_LIST',
'CanineForMultipleChoice',
'CanineForQuestionAnswering',
'CanineForSequenceClassification',
'CanineForTokenClassification',
'CanineLayer',
'CanineModel',
'CaninePreTrainedModel',
'load_tf_weights_in_canine',
]
if TYPE_CHECKING:
from .configuration_canine import CANINE_PRETRAINED_CONFIG_ARCHIVE_MAP, CanineConfig
from .tokenization_canine import CanineTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_canine import (
CANINE_PRETRAINED_MODEL_ARCHIVE_LIST,
CanineForMultipleChoice,
CanineForQuestionAnswering,
CanineForSequenceClassification,
CanineForTokenClassification,
CanineLayer,
CanineModel,
CaninePreTrainedModel,
load_tf_weights_in_canine,
)
else:
import sys
lowerCAmelCase_ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = '▁'
lowerCAmelCase_ : Tuple = {'vocab_file': 'sentencepiece.bpe.model'}
lowerCAmelCase_ : Union[str, Any] = {
'vocab_file': {
'facebook/mbart-large-50-one-to-many-mmt': (
'https://huggingface.co/facebook/mbart-large-50-one-to-many-mmt/resolve/main/sentencepiece.bpe.model'
),
}
}
lowerCAmelCase_ : int = {
'facebook/mbart-large-50-one-to-many-mmt': 10_24,
}
# fmt: off
lowerCAmelCase_ : Optional[Any] = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN', 'af_ZA', 'az_AZ', 'bn_IN', 'fa_IR', 'he_IL', 'hr_HR', 'id_ID', 'ka_GE', 'km_KH', 'mk_MK', 'ml_IN', 'mn_MN', 'mr_IN', 'pl_PL', 'ps_AF', 'pt_XX', 'sv_SE', 'sw_KE', 'ta_IN', 'te_IN', 'th_TH', 'tl_XX', 'uk_UA', 'ur_PK', 'xh_ZA', 'gl_ES', 'sl_SI']
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =['input_ids', 'attention_mask']
__a =[]
__a =[]
def __init__( self : Optional[int] , __a : Dict , __a : Optional[Any]=None , __a : Optional[int]=None , __a : Tuple="</s>" , __a : List[Any]="</s>" , __a : Any="<s>" , __a : int="<unk>" , __a : Dict="<pad>" , __a : Tuple="<mask>" , __a : Optional[Dict[str, Any]] = None , **__a : List[Any] , ):
# Mask token behave like a normal word, i.e. include the space before it
_a = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = kwargs.get("additional_special_tokens" , [] )
kwargs["additional_special_tokens"] += [
code for code in FAIRSEQ_LANGUAGE_CODES if code not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=__a , tgt_lang=__a , eos_token=__a , unk_token=__a , sep_token=__a , cls_token=__a , pad_token=__a , mask_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__a ) )
_a = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
_a = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_a = 1
_a = len(self.sp_model )
_a = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(__a )
}
_a = {v: k for k, v in self.lang_code_to_id.items()}
_a = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
_a = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
_a = src_lang if src_lang is not None else "en_XX"
_a = self.lang_code_to_id[self._src_lang]
_a = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def UpperCamelCase__ ( self : int ):
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def UpperCamelCase__ ( self : Optional[int] ):
return self._src_lang
@src_lang.setter
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self : List[str] ):
_a = self.__dict__.copy()
_a = None
return state
def __setstate__( self : Union[str, Any] , __a : Dict ):
_a = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
_a = {}
_a = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ ( self : List[str] ):
_a = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def UpperCamelCase__ ( self : Tuple , __a : str ):
return self.sp_model.encode(__a , out_type=__a )
def UpperCamelCase__ ( self : Dict , __a : str ):
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_a = self.sp_model.PieceToId(__a )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def UpperCamelCase__ ( self : Tuple , __a : int ):
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def UpperCamelCase__ ( self : Dict , __a : Dict ):
_a = []
_a = ""
_a = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__a ) + token
_a = True
_a = []
else:
current_sub_tokens.append(__a )
_a = False
out_string += self.sp_model.decode(__a )
return out_string.strip()
def UpperCamelCase__ ( self : List[str] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,)
def UpperCamelCase__ ( self : Optional[int] , __a : List[int] , __a : Optional[List[int]] = None , __a : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__a , token_ids_a=__a , already_has_special_tokens=__a )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(__a )) + suffix_ones
return prefix_ones + ([0] * len(__a )) + ([0] * len(__a )) + suffix_ones
def UpperCamelCase__ ( self : Dict , __a : List[int] , __a : Optional[List[int]] = None ):
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def UpperCamelCase__ ( self : str , __a : str , __a : str , __a : Optional[str] , __a : Optional[str] , **__a : Tuple ):
if src_lang is None or tgt_lang is None:
raise ValueError("Translation requires a `src_lang` and a `tgt_lang` for this model" )
_a = src_lang
_a = self(__a , add_special_tokens=__a , return_tensors=__a , **__a )
_a = self.convert_tokens_to_ids(__a )
_a = tgt_lang_id
return inputs
def UpperCamelCase__ ( self : List[str] , __a : List[str] , __a : str = "en_XX" , __a : Optional[List[str]] = None , __a : str = "ro_RO" , **__a : int , ):
_a = src_lang
_a = tgt_lang
return super().prepare_seqaseq_batch(__a , __a , **__a )
def UpperCamelCase__ ( self : str ):
return self.set_src_lang_special_tokens(self.src_lang )
def UpperCamelCase__ ( self : int ):
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def UpperCamelCase__ ( self : List[Any] , __a : str ):
_a = self.lang_code_to_id[src_lang]
_a = [self.cur_lang_code_id]
_a = [self.eos_token_id]
def UpperCamelCase__ ( self : Optional[Any] , __a : str ):
_a = self.lang_code_to_id[tgt_lang]
_a = [self.cur_lang_code_id]
_a = [self.eos_token_id]
| 63 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =(DDPMScheduler,)
def UpperCamelCase__ ( self : List[str] , **__a : List[str] ):
_a = {
"num_train_timesteps": 10_00,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**__a )
return config
def UpperCamelCase__ ( self : Optional[Any] ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__a )
def UpperCamelCase__ ( self : Union[str, Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=__a , beta_end=__a )
def UpperCamelCase__ ( self : int ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__a )
def UpperCamelCase__ ( self : List[Any] ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__a )
def UpperCamelCase__ ( self : Any ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__a )
def UpperCamelCase__ ( self : List[Any] ):
self.check_over_configs(thresholding=__a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__a , prediction_type=__a , sample_max_value=__a , )
def UpperCamelCase__ ( self : Tuple ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__a )
def UpperCamelCase__ ( self : str ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__a )
def UpperCamelCase__ ( self : Any ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.00979 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1e-5
def UpperCamelCase__ ( self : Tuple ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = len(__a )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
_a = model(__a , __a )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.3372 ) < 1e-3
def UpperCamelCase__ ( self : List[str] ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config(prediction_type="v_prediction" )
_a = scheduler_class(**__a )
_a = len(__a )
_a = self.dummy_model()
_a = self.dummy_sample_deter
_a = torch.manual_seed(0 )
for t in reversed(range(__a ) ):
# 1. predict noise residual
_a = model(__a , __a )
# 2. predict previous mean of sample x_t-1
_a = scheduler.step(__a , __a , __a , generator=__a ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_a = pred_prev_sample
_a = torch.sum(torch.abs(__a ) )
_a = torch.mean(torch.abs(__a ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.2631 ) < 1e-3
def UpperCamelCase__ ( self : Dict ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__a )
_a = scheduler.timesteps
for i, timestep in enumerate(__a ):
if i == len(__a ) - 1:
_a = -1
else:
_a = timesteps[i + 1]
_a = scheduler.previous_timestep(__a )
_a = prev_t.item()
self.assertEqual(__a , __a )
def UpperCamelCase__ ( self : str ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [1_00, 87, 50, 51, 0]
with self.assertRaises(__a , msg="`custom_timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=__a )
def UpperCamelCase__ ( self : str ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [1_00, 87, 50, 1, 0]
_a = len(__a )
with self.assertRaises(__a , msg="Can only pass one of `num_inference_steps` or `custom_timesteps`." ):
scheduler.set_timesteps(num_inference_steps=__a , timesteps=__a )
def UpperCamelCase__ ( self : Dict ):
_a = self.scheduler_classes[0]
_a = self.get_scheduler_config()
_a = scheduler_class(**__a )
_a = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__a , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=__a )
| 63 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 1 |
'''simple docstring'''
from math import factorial
lowerCAmelCase_ : Optional[int] = {str(d): factorial(d) for d in range(10)}
def _lowerCamelCase ( lowercase : int ) -> int:
return sum(DIGIT_FACTORIAL[d] for d in str(lowercase ) )
def _lowerCamelCase ( ) -> int:
_a = 7 * factorial(9 ) + 1
return sum(i for i in range(3 , lowercase ) if sum_of_digit_factorial(lowercase ) == i )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 1 |
'''simple docstring'''
from dataclasses import dataclass, field
from typing import Tuple
from ..utils import cached_property, is_torch_available, is_torch_tpu_available, logging, requires_backends
from .benchmark_args_utils import BenchmarkArguments
if is_torch_available():
import torch
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
lowerCAmelCase_ : Optional[int] = logging.get_logger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =[
'no_inference',
'no_cuda',
'no_tpu',
'no_speed',
'no_memory',
'no_env_print',
'no_multi_process',
]
def __init__( self : List[Any] , **__a : Optional[int] ):
for deprecated_arg in self.deprecated_args:
if deprecated_arg in kwargs:
_a = deprecated_arg[3:]
setattr(self , __a , not kwargs.pop(__a ) )
logger.warning(
f'{deprecated_arg} is depreciated. Please use --no_{positive_arg} or'
f' {positive_arg}={kwargs[positive_arg]}' )
_a = kwargs.pop("torchscript" , self.torchscript )
_a = kwargs.pop("torch_xla_tpu_print_metrics" , self.torch_xla_tpu_print_metrics )
_a = kwargs.pop("fp16_opt_level" , self.fpaa_opt_level )
super().__init__(**__a )
__a =field(default=lowerCamelCase_ , metadata={'help': 'Trace the models using torchscript'} )
__a =field(default=lowerCamelCase_ , metadata={'help': 'Print Xla/PyTorch tpu metrics'} )
__a =field(
default='O1' , metadata={
'help': (
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\']. '
'See details at https://nvidia.github.io/apex/amp.html'
)
} , )
@cached_property
def UpperCamelCase__ ( self : Any ):
requires_backends(self , ["torch"] )
logger.info("PyTorch: setting up devices" )
if not self.cuda:
_a = torch.device("cpu" )
_a = 0
elif is_torch_tpu_available():
_a = xm.xla_device()
_a = 0
else:
_a = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
_a = torch.cuda.device_count()
return device, n_gpu
@property
def UpperCamelCase__ ( self : int ):
return is_torch_tpu_available() and self.tpu
@property
def UpperCamelCase__ ( self : Optional[Any] ):
requires_backends(self , ["torch"] )
# TODO(PVP): currently only single GPU is supported
return torch.cuda.current_device()
@property
def UpperCamelCase__ ( self : Optional[int] ):
requires_backends(self , ["torch"] )
return self._setup_devices[0]
@property
def UpperCamelCase__ ( self : Union[str, Any] ):
requires_backends(self , ["torch"] )
return self._setup_devices[1]
@property
def UpperCamelCase__ ( self : Dict ):
return self.n_gpu > 0
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 1 |
'''simple docstring'''
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class __SCREAMING_SNAKE_CASE (datasets.BuilderConfig ):
"""simple docstring"""
__a =None
class __SCREAMING_SNAKE_CASE (datasets.ArrowBasedBuilder ):
"""simple docstring"""
__a =PandasConfig
def UpperCamelCase__ ( self : Optional[int] ):
return datasets.DatasetInfo(features=self.config.features )
def UpperCamelCase__ ( self : int , __a : Optional[Any] ):
if not self.config.data_files:
raise ValueError(f'At least one data file must be specified, but got data_files={self.config.data_files}' )
_a = dl_manager.download_and_extract(self.config.data_files )
if isinstance(__a , (str, list, tuple) ):
_a = data_files
if isinstance(__a , __a ):
_a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a = [dl_manager.iter_files(__a ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"files": files} )]
_a = []
for split_name, files in data_files.items():
if isinstance(__a , __a ):
_a = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
_a = [dl_manager.iter_files(__a ) for file in files]
splits.append(datasets.SplitGenerator(name=__a , gen_kwargs={"files": files} ) )
return splits
def UpperCamelCase__ ( self : int , __a : pa.Table ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
_a = table_cast(__a , self.config.features.arrow_schema )
return pa_table
def UpperCamelCase__ ( self : str , __a : str ):
for i, file in enumerate(itertools.chain.from_iterable(__a ) ):
with open(__a , "rb" ) as f:
_a = pa.Table.from_pandas(pd.read_pickle(__a ) )
yield i, self._cast_table(__a )
| 63 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 1 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase_ : Any = getLogger(__name__)
lowerCAmelCase_ : int = 'cuda' if torch.cuda.is_available() else 'cpu'
def _lowerCamelCase ( lowercase : List[str] , lowercase : str , lowercase : str , lowercase : int = 8 , lowercase : str = DEFAULT_DEVICE , lowercase : List[Any]=False , lowercase : int="summarization" , lowercase : int=None , **lowercase : int , ) -> Dict:
_a = Path(lowercase ).open("w" , encoding="utf-8" )
_a = str(lowercase )
_a = AutoModelForSeqaSeqLM.from_pretrained(lowercase ).to(lowercase )
if fpaa:
_a = model.half()
_a = AutoTokenizer.from_pretrained(lowercase )
logger.info(F'Inferred tokenizer type: {tokenizer.__class__}' ) # if this is wrong, check config.model_type.
_a = time.time()
# update config with task specific params
use_task_specific_params(lowercase , lowercase )
if prefix is None:
_a = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(lowercase , lowercase ) ) ):
_a = [prefix + text for text in examples_chunk]
_a = tokenizer(lowercase , return_tensors="pt" , truncation=lowercase , padding="longest" ).to(lowercase )
_a = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **lowercase , )
_a = tokenizer.batch_decode(lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
_a = int(time.time() - start_time ) # seconds
_a = len(lowercase )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _lowerCamelCase ( ) -> int:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def _lowerCamelCase ( lowercase : str=True ) -> int:
_a = argparse.ArgumentParser()
parser.add_argument("model_name" , type=lowercase , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=lowercase , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=lowercase , help="where to save summaries" )
parser.add_argument("--reference_path" , type=lowercase , required=lowercase , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=lowercase , required=lowercase , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=lowercase , required=lowercase , default=lowercase , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=lowercase , required=lowercase , default=lowercase , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=lowercase , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=lowercase , default=8 , required=lowercase , help="batch size" )
parser.add_argument(
"--n_obs" , type=lowercase , default=-1 , required=lowercase , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=lowercase , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
_a , _a = parser.parse_known_args()
_a = parse_numeric_n_bool_cl_kwargs(lowercase )
if parsed_args and verbose:
print(F'parsed the following generate kwargs: {parsed_args}' )
_a = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
_a = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=lowercase )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F'score_path {args.score_path} will be overwritten unless you type ctrl-c.' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
_a = generate_summaries_or_translations(
lowercase , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **lowercase , )
if args.reference_path is None:
return {}
# Compute scores
_a = calculate_bleu if "translation" in args.task else calculate_rouge
_a = [x.rstrip() for x in open(args.save_path ).readlines()]
_a = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(lowercase )]
_a = score_fn(lowercase , lowercase )
scores.update(lowercase )
if args.dump_args:
scores.update(lowercase )
if args.info:
_a = args.info
if verbose:
print(lowercase )
if args.score_path is not None:
json.dump(lowercase , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 63 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 1 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def _lowerCamelCase ( lowercase : str , lowercase : str , lowercase : str , lowercase : PreTrainedTokenizer , lowercase : int , lowercase : Optional[int] = None , ) -> List[str]:
_a = {}
if train_file is not None:
_a = [train_file]
if eval_file is not None:
_a = [eval_file]
if test_file is not None:
_a = [test_file]
_a = datasets.load_dataset("csv" , data_files=lowercase )
_a = list(ds[list(files.keys() )[0]].features.keys() )
_a = features_name.pop(lowercase )
_a = list(set(ds[list(files.keys() )[0]][label_name] ) )
_a = {label: i for i, label in enumerate(lowercase )}
_a = tokenizer.model_input_names
_a = {}
if len(lowercase ) == 1:
for k in files.keys():
_a = ds[k].map(
lambda lowercase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=lowercase , max_length=lowercase , padding="max_length" ) , batched=lowercase , )
elif len(lowercase ) == 2:
for k in files.keys():
_a = ds[k].map(
lambda lowercase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=lowercase , max_length=lowercase , padding="max_length" , ) , batched=lowercase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
_a = {k: v for k, v in ex.items() if k in input_names}
_a = labelaid[ex[label_name]]
yield (d, label)
_a = (
tf.data.Dataset.from_generator(
lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
_a = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
_a = (
tf.data.Dataset.from_generator(
lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
_a = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
_a = (
tf.data.Dataset.from_generator(
lowercase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
_a = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase_ : Optional[Any] = logging.getLogger(__name__)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(metadata={'help': 'Which column contains the label'} )
__a =field(default=lowerCamelCase_ , metadata={'help': 'The path of the training file'} )
__a =field(default=lowerCamelCase_ , metadata={'help': 'The path of the development file'} )
__a =field(default=lowerCamelCase_ , metadata={'help': 'The path of the test file'} )
__a =field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
__a =field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__a =field(default=lowerCamelCase_ , metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__a =field(
default=lowerCamelCase_ , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
def _lowerCamelCase ( ) -> Any:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_a = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
_a , _a , _a = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.info(
F'n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '
F'16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_a = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_a , _a , _a , _a = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=lowercase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
_a = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(lowercase ) , labelaid=lowercase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="text-classification" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
_a = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(".bin" in model_args.model_name_or_path ) , config=lowercase , cache_dir=model_args.cache_dir , )
def compute_metrics(lowercase : EvalPrediction ) -> Dict:
_a = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
_a = TFTrainer(
model=lowercase , args=lowercase , train_dataset=lowercase , eval_dataset=lowercase , compute_metrics=lowercase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_a = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
_a = trainer.evaluate()
_a = os.path.join(training_args.output_dir , "eval_results.txt" )
with open(lowercase , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in result.items():
logger.info(F' {key} = {value}' )
writer.write(F'{key} = {value}\n' )
results.update(lowercase )
return results
if __name__ == "__main__":
main()
| 63 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 1 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _lowerCamelCase ( lowercase : np.ndarray , lowercase : np.ndarray , lowercase : np.ndarray , lowercase : int , lowercase : int ) -> np.ndarray:
_a = cva.getAffineTransform(lowercase , lowercase )
return cva.warpAffine(lowercase , lowercase , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCAmelCase_ : Tuple = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
lowerCAmelCase_ : Optional[Any] = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCAmelCase_ , lowerCAmelCase_ : List[str] = gray_img.shape
# set different points to rotate image
lowerCAmelCase_ : List[str] = np.array([[50, 50], [2_00, 50], [50, 2_00]], np.floataa)
lowerCAmelCase_ : Any = np.array([[10, 1_00], [2_00, 50], [1_00, 2_50]], np.floataa)
lowerCAmelCase_ : Optional[Any] = np.array([[50, 50], [1_50, 50], [1_20, 2_00]], np.floataa)
lowerCAmelCase_ : Tuple = np.array([[10, 1_00], [80, 50], [1_80, 2_50]], np.floataa)
# add all rotated images in a list
lowerCAmelCase_ : Dict = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCAmelCase_ : Optional[int] = plt.figure(1)
lowerCAmelCase_ : int = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 1 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='naver-clova-ix/donut-base-finetuned-docvqa'
__a =(
'This is a tool that answers a question about an document (pdf). It takes an input named `document` which '
'should be the document containing the information, as well as a `question` that is the question about the '
'document. It returns a text that contains the answer to the question.'
)
__a ='document_qa'
__a =AutoProcessor
__a =VisionEncoderDecoderModel
__a =['image', 'text']
__a =['text']
def __init__( self : Optional[Any] , *__a : Any , **__a : Union[str, Any] ):
if not is_vision_available():
raise ValueError("Pillow must be installed to use the DocumentQuestionAnsweringTool." )
super().__init__(*__a , **__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : "Image" , __a : str ):
_a = "<s_docvqa><s_question>{user_input}</s_question><s_answer>"
_a = task_prompt.replace("{user_input}" , __a )
_a = self.pre_processor.tokenizer(
__a , add_special_tokens=__a , return_tensors="pt" ).input_ids
_a = self.pre_processor(__a , return_tensors="pt" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def UpperCamelCase__ ( self : List[Any] , __a : List[Any] ):
return self.model.generate(
inputs["pixel_values"].to(self.device ) , decoder_input_ids=inputs["decoder_input_ids"].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=__a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=__a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=__a , ).sequences
def UpperCamelCase__ ( self : str , __a : Tuple ):
_a = self.pre_processor.batch_decode(__a )[0]
_a = sequence.replace(self.pre_processor.tokenizer.eos_token , "" )
_a = sequence.replace(self.pre_processor.tokenizer.pad_token , "" )
_a = re.sub(r"<.*?>" , "" , __a , count=1 ).strip() # remove first task start token
_a = self.pre_processor.tokenajson(__a )
return sequence["answer"]
| 63 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 1 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 1 |
'''simple docstring'''
import contextlib
import os
import sqlitea
import pytest
from datasets import Dataset, Features, Value
from datasets.io.sql import SqlDatasetReader, SqlDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases, require_sqlalchemy
def _lowerCamelCase ( lowercase : Dict , lowercase : Optional[Any] ) -> str:
assert isinstance(lowercase , lowercase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@require_sqlalchemy
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : List[Any] , lowercase : Optional[Any] , lowercase : Any ) -> Optional[Any]:
_a = tmp_path / "cache"
_a = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_a = SqlDatasetReader(
"dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase , keep_in_memory=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
@require_sqlalchemy
@pytest.mark.parametrize(
"features" , [
None,
{"col_1": "string", "col_2": "int64", "col_3": "float64"},
{"col_1": "string", "col_2": "string", "col_3": "string"},
{"col_1": "int32", "col_2": "int32", "col_3": "int32"},
{"col_1": "float32", "col_2": "float32", "col_3": "float32"},
] , )
def _lowerCamelCase ( lowercase : Any , lowercase : Optional[int] , lowercase : Optional[int] , lowercase : int ) -> Optional[int]:
_a = tmp_path / "cache"
_a = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
_a = features.copy() if features else default_expected_features
_a = (
Features({feature: Value(lowercase ) for feature, dtype in features.items()} ) if features is not None else None
)
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , features=lowercase , cache_dir=lowercase ).read()
_check_sql_dataset(lowercase , lowercase )
def _lowerCamelCase ( lowercase : List[str] ) -> Optional[Any]:
with contextlib.closing(sqlitea.connect(lowercase ) ) as con:
_a = con.cursor()
cur.execute("SELECT * FROM dataset" )
for row in cur:
yield row
@require_sqlalchemy
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Tuple , lowercase : Optional[int] ) -> Optional[Any]:
_a = tmp_path / "cache"
_a = os.path.join(lowercase , "tmp.sql" )
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=1 ).write()
_a = iter_sql_file(lowercase )
_a = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def _lowerCamelCase ( lowercase : Any , lowercase : Any , lowercase : Union[str, Any] ) -> List[Any]:
_a = tmp_path / "cache"
_a = os.path.join(lowercase , "tmp.sql" )
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=2 ).write()
_a = iter_sql_file(lowercase )
_a = iter_sql_file(lowercase )
for rowa, rowa in zip(lowercase , lowercase ):
assert rowa == rowa
@require_sqlalchemy
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] ) -> Tuple:
_a = tmp_path / "cache"
_a = os.path.join(lowercase , "tmp.sql" )
_a = SqlDatasetReader("dataset" , "sqlite:///" + sqlite_path , cache_dir=lowercase ).read()
with pytest.raises(lowercase ):
SqlDatasetWriter(lowercase , "dataset" , "sqlite:///" + output_sqlite_path , num_proc=0 ).write()
| 63 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 1 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : str = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
lowerCAmelCase_ : List[Any] = {
'vocab_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'
},
'merges_file': {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'
},
'tokenizer_config_file': {
'facebook/blenderbot_small-90M': (
'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'
)
},
}
lowerCAmelCase_ : str = {'facebook/blenderbot_small-90M': 5_12}
def _lowerCamelCase ( lowercase : List[Any] ) -> List[str]:
_a = set()
_a = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_a = char
_a = set(lowercase )
return pairs
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =VOCAB_FILES_NAMES
__a =PRETRAINED_VOCAB_FILES_MAP
__a =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__a =['input_ids', 'attention_mask']
def __init__( self : List[Any] , __a : List[Any] , __a : List[Any] , __a : Optional[int]="__start__" , __a : Union[str, Any]="__end__" , __a : Any="__unk__" , __a : Union[str, Any]="__null__" , **__a : Tuple , ):
super().__init__(unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , **__a )
with open(__a , encoding="utf-8" ) as vocab_handle:
_a = json.load(__a )
_a = {v: k for k, v in self.encoder.items()}
with open(__a , encoding="utf-8" ) as merges_handle:
_a = merges_handle.read().split("\n" )[1:-1]
_a = [tuple(merge.split() ) for merge in merges]
_a = dict(zip(__a , range(len(__a ) ) ) )
_a = {}
@property
def UpperCamelCase__ ( self : Any ):
return len(self.encoder )
def UpperCamelCase__ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def UpperCamelCase__ ( self : Tuple , __a : str ):
if token in self.cache:
return self.cache[token]
_a = re.sub("([.,!?()])" , r" \1" , __a )
_a = re.sub("(')" , r" \1 " , __a )
_a = re.sub(r"\s{2,}" , " " , __a )
if "\n" in token:
_a = token.replace("\n" , " __newln__" )
_a = token.split(" " )
_a = []
for token in tokens:
if not len(__a ):
continue
_a = token.lower()
_a = tuple(__a )
_a = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
_a = get_pairs(__a )
if not pairs:
words.append(__a )
continue
while True:
_a = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
_a , _a = bigram
_a = []
_a = 0
while i < len(__a ):
try:
_a = word.index(__a , __a )
new_word.extend(word[i:j] )
_a = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_a = tuple(__a )
_a = new_word
if len(__a ) == 1:
break
else:
_a = get_pairs(__a )
_a = "@@ ".join(__a )
_a = word[:-4]
_a = word
words.append(__a )
return " ".join(__a )
def UpperCamelCase__ ( self : Union[str, Any] , __a : str ):
_a = []
_a = re.findall(r"\S+\n?" , __a )
for token in words:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def UpperCamelCase__ ( self : Optional[int] , __a : str ):
_a = token.lower()
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def UpperCamelCase__ ( self : Optional[int] , __a : int ):
return self.decoder.get(__a , self.unk_token )
def UpperCamelCase__ ( self : Tuple , __a : List[str] ):
_a = " ".join(__a ).replace("@@ " , "" ).strip()
return out_string
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : Optional[str] = None ):
if not os.path.isdir(__a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
_a = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
_a = 0
with open(__a , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
" Please check that the tokenizer is not corrupted!" )
_a = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return vocab_file, merge_file
| 63 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 1 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : float , lowercase : float , lowercase : float , lowercase : float , lowercase : float , ) -> float:
_a = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("All input parameters must be positive" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("Relative densities cannot be greater than one" )
else:
_a = 1 - (matter_density + radiation_density + dark_energy)
_a = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
_a = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCAmelCase_ : Optional[Any] = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 63 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __a : Any , __a : List[str]=13 , __a : Tuple=7 , __a : List[str]=True , __a : Union[str, Any]=True , __a : List[Any]=True , __a : List[Any]=True , __a : Union[str, Any]=99 , __a : str=32 , __a : Union[str, Any]=5 , __a : int=4 , __a : Tuple=37 , __a : int="gelu" , __a : List[str]=0.1 , __a : Any=0.1 , __a : Union[str, Any]=5_12 , __a : Optional[Any]=16 , __a : Any=2 , __a : str=0.02 , __a : str=4 , ):
_a = parent
_a = batch_size
_a = seq_length
_a = is_training
_a = use_attention_mask
_a = use_token_type_ids
_a = use_labels
_a = vocab_size
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = max_position_embeddings
_a = type_vocab_size
_a = type_sequence_label_size
_a = initializer_range
_a = num_choices
def UpperCamelCase__ ( self : List[Any] ):
_a = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_a = None
if self.use_attention_mask:
_a = random_attention_mask([self.batch_size, self.seq_length] )
_a = None
if self.use_token_type_ids:
_a = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_a = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__a , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def UpperCamelCase__ ( self : Tuple ):
_a = self.prepare_config_and_inputs()
_a , _a , _a , _a = config_and_inputs
_a = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
@require_flax
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , unittest.TestCase ):
"""simple docstring"""
__a =True
__a =(
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def UpperCamelCase__ ( self : Optional[Any] ):
_a = FlaxRoFormerModelTester(self )
@slow
def UpperCamelCase__ ( self : Union[str, Any] ):
for model_class_name in self.all_model_classes:
_a = model_class_name.from_pretrained("junnyu/roformer_chinese_small" , from_pt=__a )
_a = model(np.ones((1, 1) ) )
self.assertIsNotNone(__a )
@require_flax
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCamelCase__ ( self : str ):
_a = FlaxRoFormerForMaskedLM.from_pretrained("junnyu/roformer_chinese_base" )
_a = jnp.array([[0, 1, 2, 3, 4, 5]] )
_a = model(__a )[0]
_a = 5_00_00
_a = (1, 6, vocab_size)
self.assertEqual(output.shape , __a )
_a = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] , __a , atol=1e-4 ) )
| 63 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCAmelCase_ : str = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='marian'
__a =['past_key_values']
__a ={'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : List[str] , __a : List[str]=5_81_01 , __a : str=None , __a : int=10_24 , __a : Optional[Any]=12 , __a : int=40_96 , __a : List[str]=16 , __a : Optional[int]=12 , __a : str=40_96 , __a : Union[str, Any]=16 , __a : List[str]=0.0 , __a : str=0.0 , __a : Optional[int]=True , __a : Optional[int]=True , __a : List[str]="gelu" , __a : Dict=10_24 , __a : Optional[Any]=0.1 , __a : Union[str, Any]=0.0 , __a : Tuple=0.0 , __a : Any=0.02 , __a : str=5_81_00 , __a : int=False , __a : int=5_81_00 , __a : int=0 , __a : Dict=0 , __a : List[str]=True , **__a : Union[str, Any] , ):
_a = vocab_size
_a = decoder_vocab_size or vocab_size
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = encoder_layerdrop
_a = decoder_layerdrop
_a = use_cache
_a = encoder_layers
_a = scale_embedding # scale factor will be sqrt(d_model) if True
_a = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__a , eos_token_id=__a , is_encoder_decoder=__a , decoder_start_token_id=__a , forced_eos_token_id=__a , **__a , )
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def UpperCamelCase__ ( self : str ):
if self.task in ["default", "seq2seq-lm"]:
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_a = {0: "batch"}
_a = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
_a = {0: "batch", 1: "decoder_sequence"}
_a = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__a , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
_a , _a = self.num_layers
for i in range(__a ):
_a = {0: "batch", 2: "past_sequence + sequence"}
_a = {0: "batch", 2: "past_sequence + sequence"}
else:
_a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def UpperCamelCase__ ( self : List[str] ):
if self.task in ["default", "seq2seq-lm"]:
_a = super().outputs
else:
_a = super(__a , self ).outputs
if self.use_past:
_a , _a = self.num_layers
for i in range(__a ):
_a = {0: "batch", 2: "past_sequence + sequence"}
_a = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def UpperCamelCase__ ( self : Tuple , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
# Generate decoder inputs
_a = seq_length if not self.use_past else 1
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
_a = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_a = dict(**__a , **__a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_a , _a = common_inputs["input_ids"].shape
_a = common_inputs["decoder_input_ids"].shape[1]
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = decoder_seq_length + 3
_a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__a , __a )] , dim=1 )
_a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_a , _a = self.num_layers
_a = min(__a , __a )
_a = max(__a , __a ) - min_num_layers
_a = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__a ):
common_inputs["past_key_values"].append(
(
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
torch.zeros(__a ),
) )
# TODO: test this.
_a = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__a , __a ):
common_inputs["past_key_values"].append((torch.zeros(__a ), torch.zeros(__a )) )
return common_inputs
def UpperCamelCase__ ( self : Union[str, Any] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
_a = self._generate_dummy_inputs_for_encoder_and_decoder(
__a , __a , __a , __a , __a )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
_a , _a = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
_a = seqlen + 2
_a , _a = self.num_layers
_a , _a = self.num_attention_heads
_a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_a = common_inputs["attention_mask"].dtype
_a = torch.cat(
[common_inputs["attention_mask"], torch.ones(__a , __a , dtype=__a )] , dim=1 )
_a = [
(torch.zeros(__a ), torch.zeros(__a )) for _ in range(__a )
]
return common_inputs
def UpperCamelCase__ ( self : str , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_a = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_a = tokenizer.num_special_tokens_to_add(__a )
_a = compute_effective_axis_dimension(
__a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__a )
# Generate dummy inputs according to compute batch and sequence
_a = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
_a = dict(tokenizer(__a , return_tensors=__a ) )
return common_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : PreTrainedTokenizer , __a : int = -1 , __a : int = -1 , __a : bool = False , __a : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
_a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
else:
_a = self._generate_dummy_inputs_for_causal_lm(
__a , batch_size=__a , seq_length=__a , is_pair=__a , framework=__a )
return common_inputs
def UpperCamelCase__ ( self : Dict , __a : Dict , __a : Optional[int] , __a : str , __a : Tuple ):
if self.task in ["default", "seq2seq-lm"]:
_a = super()._flatten_past_key_values_(__a , __a , __a , __a )
else:
_a = super(__a , self )._flatten_past_key_values_(
__a , __a , __a , __a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return 1e-4
| 63 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
UpperCAmelCase__ = {
"configuration_mobilevit": ["MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileViTConfig", "MobileViTOnnxConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["MobileViTFeatureExtractor"]
UpperCAmelCase__ = ["MobileViTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MobileViTForImageClassification",
"MobileViTForSemanticSegmentation",
"MobileViTModel",
"MobileViTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFMobileViTForImageClassification",
"TFMobileViTForSemanticSegmentation",
"TFMobileViTModel",
"TFMobileViTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 0 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCAmelCase_ ( snake_case_ : int ) -> int:
'''simple docstring'''
UpperCAmelCase_ = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
UpperCAmelCase_ = 4
UpperCAmelCase_ = 48
UpperCAmelCase_ = "pixelshuffle_aux"
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ = [6, 6, 6, 6]
UpperCAmelCase_ = 60
UpperCAmelCase_ = [6, 6, 6, 6]
UpperCAmelCase_ = "pixelshuffledirect"
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ = 4
UpperCAmelCase_ = "nearest+conv"
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1
UpperCAmelCase_ = 1_26
UpperCAmelCase_ = 7
UpperCAmelCase_ = 255.0
UpperCAmelCase_ = ""
return config
def lowerCAmelCase_ ( snake_case_ : List[Any] , snake_case_ : Optional[Any] ) -> List[str]:
'''simple docstring'''
if "patch_embed.proj" in name and "layers" not in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
UpperCAmelCase_ = name.replace("patch_embed.norm" , "embeddings.patch_embeddings.layernorm" )
if "layers" in name:
UpperCAmelCase_ = name.replace("layers" , "encoder.stages" )
if "residual_group.blocks" in name:
UpperCAmelCase_ = name.replace("residual_group.blocks" , "layers" )
if "attn.proj" in name:
UpperCAmelCase_ = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
UpperCAmelCase_ = name.replace("attn" , "attention.self" )
if "norm1" in name:
UpperCAmelCase_ = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
UpperCAmelCase_ = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
UpperCAmelCase_ = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
UpperCAmelCase_ = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
UpperCAmelCase_ = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
UpperCAmelCase_ = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
UpperCAmelCase_ = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
UpperCAmelCase_ = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if "patch_embed.proj" in name:
UpperCAmelCase_ = name.replace("patch_embed.proj" , "patch_embed.projection" )
if name == "norm.weight":
UpperCAmelCase_ = "layernorm.weight"
if name == "norm.bias":
UpperCAmelCase_ = "layernorm.bias"
if "conv_first" in name:
UpperCAmelCase_ = name.replace("conv_first" , "first_convolution" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
UpperCAmelCase_ = name.replace("conv_last" , "final_convolution" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
UpperCAmelCase_ = name.replace("conv_before_upsample.0" , "conv_before_upsample" )
if "upsample.0" in name:
UpperCAmelCase_ = name.replace("upsample.0" , "upsample.convolution_0" )
if "upsample.2" in name:
UpperCAmelCase_ = name.replace("upsample.2" , "upsample.convolution_1" )
UpperCAmelCase_ = "upsample." + name
elif config.upsampler == "pixelshuffledirect":
UpperCAmelCase_ = name.replace("upsample.0.weight" , "upsample.conv.weight" )
UpperCAmelCase_ = name.replace("upsample.0.bias" , "upsample.conv.bias" )
else:
pass
else:
UpperCAmelCase_ = "swin2sr." + name
return name
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Optional[Any] ) -> Any:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
UpperCAmelCase_ = orig_state_dict.pop(snake_case_ )
if "qkv" in key:
UpperCAmelCase_ = key.split("." )
UpperCAmelCase_ = int(key_split[1] )
UpperCAmelCase_ = int(key_split[4] )
UpperCAmelCase_ = config.embed_dim
if "weight" in key:
UpperCAmelCase_ = val[:dim, :]
UpperCAmelCase_ = val[dim : dim * 2, :]
UpperCAmelCase_ = val[-dim:, :]
else:
UpperCAmelCase_ = val[:dim]
UpperCAmelCase_ = val[dim : dim * 2]
UpperCAmelCase_ = val[-dim:]
pass
else:
UpperCAmelCase_ = val
return orig_state_dict
def lowerCAmelCase_ ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Any ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ = get_config(snake_case_ )
UpperCAmelCase_ = SwinaSRForImageSuperResolution(snake_case_ )
model.eval()
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
UpperCAmelCase_ = convert_state_dict(snake_case_ , snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = model.load_state_dict(snake_case_ , strict=snake_case_ )
if len(snake_case_ ) > 0:
raise ValueError("Missing keys when converting: {}".format(snake_case_ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f"""Unexpected key {key} in state_dict""" )
# verify values
UpperCAmelCase_ = "https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"
UpperCAmelCase_ = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw ).convert("RGB" )
UpperCAmelCase_ = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
UpperCAmelCase_ = 1_26 if "Jpeg" in checkpoint_url else 2_56
UpperCAmelCase_ = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
UpperCAmelCase_ = transforms(snake_case_ ).unsqueeze(0 )
if config.num_channels == 1:
UpperCAmelCase_ = pixel_values[:, 0, :, :].unsqueeze(1 )
UpperCAmelCase_ = model(snake_case_ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_ = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 5_12, 5_12] )
UpperCAmelCase_ = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
UpperCAmelCase_ = torch.Size([1, 3, 10_24, 10_24] )
UpperCAmelCase_ = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f"""Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}"""
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , snake_case_ , atol=1E-3 )
print("Looks ok!" )
UpperCAmelCase_ = {
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth": (
"swin2SR-classical-sr-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth": (
"swin2SR-classical-sr-x4-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth": (
"swin2SR-compressed-sr-x4-48"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth": (
"swin2SR-lightweight-x2-64"
),
"https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth": (
"swin2SR-realworld-sr-x4-64-bsrgan-psnr"
),
}
UpperCAmelCase_ = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case_ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(snake_case_ )
if push_to_hub:
model.push_to_hub(f"""caidas/{model_name}""" )
processor.push_to_hub(f"""caidas/{model_name}""" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_: Any =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
SCREAMING_SNAKE_CASE_: List[Any] =parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
'''simple docstring'''
import argparse
import logging
import os
import datasets
import tensorflow as tf
from transformers import AutoTokenizer
lowerCamelCase : int = logging.getLogger(__name__)
def _SCREAMING_SNAKE_CASE () -> List[Any]:
"""simple docstring"""
lowercase__ = argparse.ArgumentParser(
description='''Prepare TFRecord shards from pre-tokenized samples of the wikitext dataset.''' )
parser.add_argument(
'''--dataset_name''' , type=A , default='''wikitext''' , help='''Name of the training. Explore datasets at: hf.co/datasets.''' , )
parser.add_argument(
'''--dataset_config''' , type=A , default='''wikitext-103-raw-v1''' , help='''Configuration name of the dataset.''' )
parser.add_argument(
'''--tokenizer_name_or_path''' , type=A , default='''sayakpaul/unigram-tokenizer-wikitext''' , help='''Tokenizer identifier. Can be a local filepath or a Hub identifier.''' , )
parser.add_argument(
'''--shard_size''' , type=A , default=1_000 , help='''Number of entries to go in a single shard.''' , )
parser.add_argument('''--split''' , type=A , default='''train''' , choices=['''train''', '''test''', '''validation'''] )
parser.add_argument(
'''--limit''' , default=A , type=A , help='''Limit the number of shards (used for debugging).''' , )
parser.add_argument(
'''--max_length''' , type=A , default=512 , help='''Maximum sequence length. For training on TPUs, it helps to have a maximum'''
''' sequence length that is a multiple of 8.''' , )
parser.add_argument(
'''--output_dir''' , default='''tf-tpu''' , type=A , help='''Output directory where the TFRecord shards will be saved. If the'''
''' path is appended with `gs://` (\'gs://tf-tpu\', for example) then the TFRecord'''
''' shards will be directly saved to a Google Cloud Storage bucket.''' , )
lowercase__ = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (A ) -> str:
"""simple docstring"""
def fn(A ):
return tokenizer(examples['''text'''] )
return fn
def _SCREAMING_SNAKE_CASE (A ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = []
for i in range(len(tokenized_data['''input_ids'''] ) ):
lowercase__ = {
'''input_ids''': tf.train.Feature(intaa_list=tf.train.IntaaList(value=tokenized_data['''input_ids'''][i] ) ),
'''attention_mask''': tf.train.Feature(
intaa_list=tf.train.IntaaList(value=tokenized_data['''attention_mask'''][i] ) ),
}
lowercase__ = tf.train.Features(feature=A )
lowercase__ = tf.train.Example(features=A )
lowercase__ = example.SerializeToString()
records.append(A )
return records
def _SCREAMING_SNAKE_CASE (A ) -> Tuple:
"""simple docstring"""
lowercase__ = datasets.load_dataset(args.dataset_name , args.dataset_config , split=args.split )
if args.limit is not None:
lowercase__ = min(len(A ) , args.limit )
lowercase__ = dataset.select(range(A ) )
print(f"Limiting the dataset to {args.limit} entries." )
lowercase__ = AutoTokenizer.from_pretrained(args.tokenizer_name_or_path )
# Handle output directory creation.
# For serializing into a Google Cloud Storage Bucket, one needs to first
# create a bucket.
if "gs" not in args.output_dir:
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
lowercase__ = os.path.join(args.output_dir , args.split )
if not os.path.exists(A ):
os.makedirs(A )
else:
lowercase__ = os.path.join(args.output_dir , args.split )
# Tokenize the whole dataset at once.
lowercase__ = tokenize_function(A )
lowercase__ = dataset.map(A , batched=A , num_proc=4 , remove_columns=['''text'''] )
# We need to concatenate all our texts together, and then split the result
# into chunks of a fixed size, which we will call block_size. To do this, we
# will use the map method again, with the option batched=True. When we use batched=True,
# the function we pass to map() will be passed multiple inputs at once, allowing us
# to group them into more or fewer examples than we had in the input.
# This allows us to create our new fixed-length samples. The advantage of this
# method is that we don't lose a whole lot of content from the dataset compared to the
# case where we simply tokenize with a pre-defined max_length.
def group_texts(A ):
# Concatenate all texts.
lowercase__ = {k: sum(examples[k] , [] ) for k in examples.keys()}
lowercase__ = len(concatenated_examples[list(examples.keys() )[0]] )
# We drop the small remainder, though you could add padding instead if the model supports it
# In this, as in all things, we advise you to follow your heart 🫀
lowercase__ = (total_length // args.max_length) * args.max_length
# Split by chunks of max_len.
lowercase__ = {
k: [t[i : i + args.max_length] for i in range(0 , A , args.max_length )]
for k, t in concatenated_examples.items()
}
return result
lowercase__ = dataset_tokenized.map(A , batched=A , batch_size=1_000 , num_proc=4 )
lowercase__ = 0
lowercase__ = 0
for shard in range(0 , len(A ) , args.shard_size ):
lowercase__ = grouped_dataset[shard : shard + args.shard_size]
lowercase__ = len(dataset_snapshot['''input_ids'''] )
lowercase__ = os.path.join(A , f"dataset-{shard_count}-{records_containing}.tfrecord" )
lowercase__ = get_serialized_examples(A )
with tf.io.TFRecordWriter(A ) as out_file:
for i in range(len(A ) ):
lowercase__ = serialized_examples[i]
out_file.write(A )
print('''Wrote file {} containing {} records'''.format(A , A ) )
shard_count += 1
total_records += records_containing
with open(f"split-{args.split}-records-count.txt" , '''w''' ) as f:
print(f"Total {args.split} records: {total_records}" , file=A )
if __name__ == "__main__":
lowerCamelCase : List[Any] = parse_args()
main(args)
| 2 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import MobileBertConfig, is_tf_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_PRETRAINING_MAPPING,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertModel,
)
@require_tf
class A ( __snake_case , __snake_case , unittest.TestCase ):
__magic_name__ = (
(
TFMobileBertModel,
TFMobileBertForMaskedLM,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertForMultipleChoice,
)
if is_tf_available()
else ()
)
__magic_name__ = (
{
'''feature-extraction''': TFMobileBertModel,
'''fill-mask''': TFMobileBertForMaskedLM,
'''question-answering''': TFMobileBertForQuestionAnswering,
'''text-classification''': TFMobileBertForSequenceClassification,
'''token-classification''': TFMobileBertForTokenClassification,
'''zero-shot''': TFMobileBertForSequenceClassification,
}
if is_tf_available()
else {}
)
__magic_name__ = False
__magic_name__ = False
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=False ) -> List[str]:
"""simple docstring"""
A : Tuple = super()._prepare_for_class(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , return_labels=SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(SCREAMING_SNAKE_CASE ):
A : int = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
return inputs_dict
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE=13 , SCREAMING_SNAKE_CASE=7 , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=True , SCREAMING_SNAKE_CASE=99 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=32 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=37 , SCREAMING_SNAKE_CASE="gelu" , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=0.1 , SCREAMING_SNAKE_CASE=512 , SCREAMING_SNAKE_CASE=16 , SCREAMING_SNAKE_CASE=2 , SCREAMING_SNAKE_CASE=0.02 , SCREAMING_SNAKE_CASE=3 , SCREAMING_SNAKE_CASE=4 , SCREAMING_SNAKE_CASE=None , ) -> str:
"""simple docstring"""
A : List[str] = parent
A : str = batch_size
A : Optional[Any] = seq_length
A : List[str] = is_training
A : List[Any] = use_input_mask
A : Optional[Any] = use_token_type_ids
A : Optional[Any] = use_labels
A : List[str] = vocab_size
A : Dict = hidden_size
A : Union[str, Any] = num_hidden_layers
A : Tuple = num_attention_heads
A : Dict = intermediate_size
A : Tuple = hidden_act
A : List[Any] = hidden_dropout_prob
A : Tuple = attention_probs_dropout_prob
A : int = max_position_embeddings
A : int = type_vocab_size
A : str = type_sequence_label_size
A : int = initializer_range
A : Optional[Any] = num_labels
A : Optional[int] = num_choices
A : Tuple = scope
A : Dict = embedding_size
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A : Union[str, Any] = None
if self.use_input_mask:
A : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A : Dict = None
if self.use_token_type_ids:
A : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A : Tuple = None
A : str = None
A : Any = None
if self.use_labels:
A : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
A : Optional[Any] = MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , embedding_size=self.embedding_size , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : int = TFMobileBertModel(config=SCREAMING_SNAKE_CASE )
A : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : List[str] = model(SCREAMING_SNAKE_CASE )
A : str = [input_ids, input_mask]
A : List[str] = model(SCREAMING_SNAKE_CASE )
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = TFMobileBertForMaskedLM(config=SCREAMING_SNAKE_CASE )
A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[Any]:
"""simple docstring"""
A : Union[str, Any] = TFMobileBertForNextSentencePrediction(config=SCREAMING_SNAKE_CASE )
A : Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : str = TFMobileBertForPreTraining(config=SCREAMING_SNAKE_CASE )
A : int = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
A : str = self.num_labels
A : Tuple = TFMobileBertForSequenceClassification(config=SCREAMING_SNAKE_CASE )
A : Optional[Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Dict = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
A : Dict = self.num_choices
A : Dict = TFMobileBertForMultipleChoice(config=SCREAMING_SNAKE_CASE )
A : Tuple = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : int = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Union[str, Any] = tf.tile(tf.expand_dims(SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
A : Tuple = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A : Optional[Any] = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> Tuple:
"""simple docstring"""
A : Dict = self.num_labels
A : Optional[Any] = TFMobileBertForTokenClassification(config=SCREAMING_SNAKE_CASE )
A : Any = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : str = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = TFMobileBertForQuestionAnswering(config=SCREAMING_SNAKE_CASE )
A : Dict = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A : Any = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : List[str] = self.prepare_config_and_inputs()
(
(
A
), (
A
), (
A
), (
A
), (
A
), (
A
), (
A
),
) : Any = config_and_inputs
A : List[str] = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
def __lowerCAmelCase ( self ) -> Tuple:
"""simple docstring"""
A : Optional[int] = TFMobileBertModelTest.TFMobileBertModelTester(self )
A : Union[str, Any] = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> str:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
A : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> Optional[Any]:
"""simple docstring"""
A : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ( self ) -> List[str]:
"""simple docstring"""
A : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*SCREAMING_SNAKE_CASE )
@slow
def __lowerCAmelCase ( self ) -> Any:
"""simple docstring"""
for model_name in ["google/mobilebert-uncased"]:
A : Optional[int] = TFMobileBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
@require_tf
class A ( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self ) -> Dict:
"""simple docstring"""
A : Optional[Any] = TFMobileBertForPreTraining.from_pretrained('''google/mobilebert-uncased''' )
A : Tuple = tf.constant([[0, 1, 2, 3, 4, 5]] )
A : str = model(SCREAMING_SNAKE_CASE )[0]
A : Dict = [1, 6, 30522]
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE )
A : Union[str, Any] = tf.constant(
[
[
[-4.5_919_547, -9.248_295, -9.645_256],
[-6.7_306_175, -6.440_284, -6.6_052_837],
[-7.2_743_506, -6.7_847_915, -6.024_673],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 )
| 3 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
'''simple docstring'''
import numpy as np
def a_ ( lowerCamelCase : np.ndarray , lowerCamelCase : float ):
return np.where(vector > 0 , lowerCamelCase , (alpha * (np.exp(lowerCamelCase ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 4 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase__ ( lowerCAmelCase , unittest.TestCase):
SCREAMING_SNAKE_CASE__ = CTRLTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def __A (self ) -> int:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
_lowercase =['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
_lowercase =dict(zip(UpperCAmelCase , range(len(UpperCAmelCase ) ) ) )
_lowercase =['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
_lowercase ={'''unk_token''': '''<unk>'''}
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
_lowercase =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(UpperCAmelCase ) )
def __A (self , **UpperCAmelCase ) -> str:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **UpperCAmelCase )
def __A (self , UpperCAmelCase ) -> Union[str, Any]:
_lowercase ='''adapt react readapt apt'''
_lowercase ='''adapt react readapt apt'''
return input_text, output_text
def __A (self ) -> Dict:
_lowercase =CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
_lowercase ='''adapt react readapt apt'''
_lowercase ='''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
_lowercase =tokenizer.tokenize(UpperCAmelCase )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase )
_lowercase =tokens + [tokenizer.unk_token]
_lowercase =[0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCAmelCase ) , UpperCAmelCase )
| 5 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
import enum
import shutil
import sys
A , A : Dict = shutil.get_terminal_size()
A : str = {'UP': 'A', 'DOWN': 'B', 'RIGHT': 'C', 'LEFT': 'D'}
class __A( enum.Enum ):
snake_case_ = 0
snake_case_ = 1
def __lowerCAmelCase ( a__ , a__="" ) -> Optional[int]:
sys.stdout.write(str(a__ ) + end )
sys.stdout.flush()
def __lowerCAmelCase ( a__ , a__ , a__="" ) -> Dict:
forceWrite(F"""\u001b[{color}m{content}\u001b[0m""" , a__ )
def __lowerCAmelCase ( ) -> Any:
forceWrite('''\r''' )
def __lowerCAmelCase ( a__ , a__ ) -> Any:
forceWrite(F"""\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}""" )
def __lowerCAmelCase ( ) -> int:
forceWrite(''' ''' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCAmelCase ( ) -> Dict:
reset_cursor()
forceWrite('''-''' * TERMINAL_WIDTH ) | 6 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowercase_ = {"configuration_deit": ["DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DeiTConfig", "DeiTOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ["DeiTFeatureExtractor"]
lowercase_ = ["DeiTImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DeiTForImageClassification",
"DeiTForImageClassificationWithTeacher",
"DeiTForMaskedImageModeling",
"DeiTModel",
"DeiTPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDeiTForImageClassification",
"TFDeiTForImageClassificationWithTeacher",
"TFDeiTForMaskedImageModeling",
"TFDeiTModel",
"TFDeiTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 7 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
from binascii import hexlify
from hashlib import shaaaa
from os import urandom
# RFC 3526 - More Modular Exponential (MODP) Diffie-Hellman groups for
# Internet Key Exchange (IKE) https://tools.ietf.org/html/rfc3526
lowerCAmelCase_ = {
# 1536-bit
5: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA237327FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 2048-bit
14: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AACAA68FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 3072-bit
15: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 4096-bit
16: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934063199'''
+ '''FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 6144-bit
17: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E08'''
+ '''8A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B'''
+ '''302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9'''
+ '''A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE6'''
+ '''49286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8'''
+ '''FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C'''
+ '''180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF695581718'''
+ '''3995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D'''
+ '''04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7D'''
+ '''B3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D226'''
+ '''1AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFC'''
+ '''E0FD108E4B82D120A92108011A723C12A787E6D788719A10BDBA5B26'''
+ '''99C327186AF4E23C1A946834B6150BDA2583E9CA2AD44CE8DBBBC2DB'''
+ '''04DE8EF92E8EFC141FBECAA6287C59474E6BC05D99B2964FA090C3A2'''
+ '''233BA186515BE7ED1F612970CEE2D7AFB81BDD762170481CD0069127'''
+ '''D5B05AA993B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BDF8FF9406'''
+ '''AD9E530EE5DB382F413001AEB06A53ED9027D831179727B0865A8918'''
+ '''DA3EDBEBCF9B14ED44CE6CBACED4BB1BDB7F1447E6CC254B33205151'''
+ '''2BD7AF426FB8F401378CD2BF5983CA01C64B92ECF032EA15D1721D03'''
+ '''F482D7CE6E74FEF6D55E702F46980C82B5A84031900B1C9E59E7C97F'''
+ '''BEC7E8F323A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE32806A1D58B'''
+ '''B7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55CDA56C9EC2EF29632'''
+ '''387FE8D76E3C0468043E8F663F4860EE12BF2D5B0B7474D6E694F91E'''
+ '''6DCC4024FFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
# 8192-bit
18: {
'''prime''': int(
'''FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD1'''
+ '''29024E088A67CC74020BBEA63B139B22514A08798E3404DD'''
+ '''EF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245'''
+ '''E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7ED'''
+ '''EE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3D'''
+ '''C2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F'''
+ '''83655D23DCA3AD961C62F356208552BB9ED529077096966D'''
+ '''670C354E4ABC9804F1746C08CA18217C32905E462E36CE3B'''
+ '''E39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9'''
+ '''DE2BCBF6955817183995497CEA956AE515D2261898FA0510'''
+ '''15728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64'''
+ '''ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7'''
+ '''ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6B'''
+ '''F12FFA06D98A0864D87602733EC86A64521F2B18177B200C'''
+ '''BBE117577A615D6C770988C0BAD946E208E24FA074E5AB31'''
+ '''43DB5BFCE0FD108E4B82D120A92108011A723C12A787E6D7'''
+ '''88719A10BDBA5B2699C327186AF4E23C1A946834B6150BDA'''
+ '''2583E9CA2AD44CE8DBBBC2DB04DE8EF92E8EFC141FBECAA6'''
+ '''287C59474E6BC05D99B2964FA090C3A2233BA186515BE7ED'''
+ '''1F612970CEE2D7AFB81BDD762170481CD0069127D5B05AA9'''
+ '''93B4EA988D8FDDC186FFB7DC90A6C08F4DF435C934028492'''
+ '''36C3FAB4D27C7026C1D4DCB2602646DEC9751E763DBA37BD'''
+ '''F8FF9406AD9E530EE5DB382F413001AEB06A53ED9027D831'''
+ '''179727B0865A8918DA3EDBEBCF9B14ED44CE6CBACED4BB1B'''
+ '''DB7F1447E6CC254B332051512BD7AF426FB8F401378CD2BF'''
+ '''5983CA01C64B92ECF032EA15D1721D03F482D7CE6E74FEF6'''
+ '''D55E702F46980C82B5A84031900B1C9E59E7C97FBEC7E8F3'''
+ '''23A97A7E36CC88BE0F1D45B7FF585AC54BD407B22B4154AA'''
+ '''CC8F6D7EBF48E1D814CC5ED20F8037E0A79715EEF29BE328'''
+ '''06A1D58BB7C5DA76F550AA3D8A1FBFF0EB19CCB1A313D55C'''
+ '''DA56C9EC2EF29632387FE8D76E3C0468043E8F663F4860EE'''
+ '''12BF2D5B0B7474D6E694F91E6DBE115974A3926F12FEE5E4'''
+ '''38777CB6A932DF8CD8BEC4D073B931BA3BC832B68D9DD300'''
+ '''741FA7BF8AFC47ED2576F6936BA424663AAB639C5AE4F568'''
+ '''3423B4742BF1C978238F16CBE39D652DE3FDB8BEFC848AD9'''
+ '''22222E04A4037C0713EB57A81A23F0C73473FC646CEA306B'''
+ '''4BCBC8862F8385DDFA9D4B7FA2C087E879683303ED5BDD3A'''
+ '''062B3CF5B3A278A66D2A13F83F44F82DDF310EE074AB6A36'''
+ '''4597E899A0255DC164F31CC50846851DF9AB48195DED7EA1'''
+ '''B1D510BD7EE74D73FAF36BC31ECFA268359046F4EB879F92'''
+ '''4009438B481C6CD7889A002ED5EE382BC9190DA6FC026E47'''
+ '''9558E4475677E9AA9E3050E2765694DFC81F56E880B96E71'''
+ '''60C980DD98EDD3DFFFFFFFFFFFFFFFFF''',
base=16,
),
'''generator''': 2,
},
}
class snake_case_ :
'''simple docstring'''
def __init__( self : List[str] , _UpperCamelCase : int = 1_4 ) ->None:
if group not in primes:
raise ValueError('''Unsupported Group''' )
snake_case_ = primes[group]['''prime''']
snake_case_ = primes[group]['''generator''']
snake_case_ = int(hexlify(urandom(3_2 ) ) , base=1_6 )
def snake_case__( self : int ) ->str:
return hex(self.__private_key )[2:]
def snake_case__( self : List[str] ) ->str:
snake_case_ = pow(self.generator , self.__private_key , self.prime )
return hex(_UpperCamelCase )[2:]
def snake_case__( self : Optional[Any] , _UpperCamelCase : int ) ->bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= key <= self.prime - 2
and pow(_UpperCamelCase , (self.prime - 1) // 2 , self.prime ) == 1
)
def snake_case__( self : List[str] , _UpperCamelCase : str ) ->str:
snake_case_ = int(_UpperCamelCase , base=1_6 )
if not self.is_valid_public_key(_UpperCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case_ = pow(_UpperCamelCase , self.__private_key , self.prime )
return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest()
@staticmethod
def snake_case__( _UpperCamelCase : int , _UpperCamelCase : int ) ->bool:
# check if the other public key is valid based on NIST SP800-56
return (
2 <= remote_public_key_str <= prime - 2
and pow(_UpperCamelCase , (prime - 1) // 2 , _UpperCamelCase ) == 1
)
@staticmethod
def snake_case__( _UpperCamelCase : str , _UpperCamelCase : str , _UpperCamelCase : int = 1_4 ) ->str:
snake_case_ = int(_UpperCamelCase , base=1_6 )
snake_case_ = int(_UpperCamelCase , base=1_6 )
snake_case_ = primes[group]['''prime''']
if not DiffieHellman.is_valid_public_key_static(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('''Invalid public key''' )
snake_case_ = pow(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
return shaaaa(str(_UpperCamelCase ).encode() ).hexdigest()
if __name__ == "__main__":
import doctest
doctest.testmod() | 8 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
__lowerCAmelCase : Optional[int] =tuple[int, int]
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :set[int] , lowerCAmelCase__ :Mapping[EdgeT, int] ) -> None:
__SCREAMING_SNAKE_CASE : set[int] = vertices
__SCREAMING_SNAKE_CASE : dict[EdgeT, int] = {
(min(lowerCAmelCase__ ), max(lowerCAmelCase__ )): weight for edge, weight in edges.items()
}
def __magic_name__( self :List[str] , lowerCAmelCase__ :EdgeT , lowerCAmelCase__ :int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__SCREAMING_SNAKE_CASE : List[str] = weight
def __magic_name__( self :List[str] ) -> Graph:
__SCREAMING_SNAKE_CASE : Graph = Graph({min(self.vertices )} , {} )
__SCREAMING_SNAKE_CASE : EdgeT
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : EdgeT
__SCREAMING_SNAKE_CASE : int
while len(subgraph.vertices ) < len(self.vertices ):
__SCREAMING_SNAKE_CASE : int = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__SCREAMING_SNAKE_CASE : Union[str, Any] = edge
__SCREAMING_SNAKE_CASE : Dict = weight
subgraph.add_edge(lowerCAmelCase__ , lowerCAmelCase__ )
return subgraph
def _UpperCamelCase ( lowercase__ = "p107_network.txt" ):
__SCREAMING_SNAKE_CASE : str = os.path.abspath(os.path.dirname(lowercase__ ) )
__SCREAMING_SNAKE_CASE : str = os.path.join(lowercase__ , lowercase__ )
__SCREAMING_SNAKE_CASE : dict[EdgeT, int] = {}
__SCREAMING_SNAKE_CASE : list[str]
__SCREAMING_SNAKE_CASE : int
__SCREAMING_SNAKE_CASE : int
with open(lowercase__ ) as f:
__SCREAMING_SNAKE_CASE : int = f.read().strip().split('''\n''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__SCREAMING_SNAKE_CASE : List[str] = int(adjaceny_matrix[edgea][edgea] )
__SCREAMING_SNAKE_CASE : Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
__SCREAMING_SNAKE_CASE : Graph = graph.prims_algorithm()
__SCREAMING_SNAKE_CASE : int = sum(graph.edges.values() )
__SCREAMING_SNAKE_CASE : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(f"""{solution() = }""")
| 9 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
__A = "<<<<<<< This should probably be modified because it mentions: "
__A = "=======\n>>>>>>>\n"
__A = [
"TextEncoderConfig",
"ByteTextEncoder",
"SubwordTextEncoder",
"encoder_config",
"maybe_build_from_corpus",
"manual_dir",
]
__A = [
# (pattern, replacement)
# Order is important here for some replacements
(R"tfds\.core", R"datasets"),
(R"tf\.io\.gfile\.GFile", R"open"),
(R"tf\.([\w\d]+)", R"datasets.Value('\1')"),
(R"tfds\.features\.Text\(\)", R"datasets.Value('string')"),
(R"tfds\.features\.Text\(", R"datasets.Value('string'),"),
(R"features\s*=\s*tfds.features.FeaturesDict\(", R"features=datasets.Features("),
(R"tfds\.features\.FeaturesDict\(", R"dict("),
(R"The TensorFlow Datasets Authors", R"The TensorFlow Datasets Authors and the HuggingFace Datasets Authors"),
(R"tfds\.", R"datasets."),
(R"dl_manager\.manual_dir", R"self.config.data_dir"),
(R"self\.builder_config", R"self.config"),
]
def lowerCAmelCase_ ( __a ) -> Optional[Any]:
"""simple docstring"""
return ConvertCommand(args.tfds_path , args.datasets_directory )
class _SCREAMING_SNAKE_CASE ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def SCREAMING_SNAKE_CASE_ (UpperCAmelCase_ : ArgumentParser) ->Dict:
'''simple docstring'''
lowerCamelCase__: List[str] =parser.add_parser(
"convert" , help="Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset." , )
train_parser.add_argument(
"--tfds_path" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to a TensorFlow Datasets folder to convert or a single tfds file to convert." , )
train_parser.add_argument(
"--datasets_directory" , type=UpperCAmelCase_ , required=UpperCAmelCase_ , help="Path to the HuggingFace Datasets folder.")
train_parser.set_defaults(func=UpperCAmelCase_)
def __init__(self : Any , UpperCAmelCase_ : str , UpperCAmelCase_ : str , *UpperCAmelCase_ : List[str]) ->int:
'''simple docstring'''
lowerCamelCase__: str =get_logger("datasets-cli/converting")
lowerCamelCase__: Tuple =tfds_path
lowerCamelCase__: Union[str, Any] =datasets_directory
def SCREAMING_SNAKE_CASE_ (self : List[str]) ->Union[str, Any]:
'''simple docstring'''
if os.path.isdir(self._tfds_path):
lowerCamelCase__: Dict =os.path.abspath(self._tfds_path)
elif os.path.isfile(self._tfds_path):
lowerCamelCase__: Any =os.path.dirname(self._tfds_path)
else:
raise ValueError("--tfds_path is neither a directory nor a file. Please check path.")
lowerCamelCase__: Any =os.path.abspath(self._datasets_directory)
self._logger.info(F"""Converting datasets from {abs_tfds_path} to {abs_datasets_path}""")
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Optional[int] =[]
lowerCamelCase__: int ={}
if os.path.isdir(self._tfds_path):
lowerCamelCase__: Tuple =os.listdir(UpperCAmelCase_)
else:
lowerCamelCase__: int =[os.path.basename(self._tfds_path)]
for f_name in file_names:
self._logger.info(F"""Looking at file {f_name}""")
lowerCamelCase__: Tuple =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: List[str] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
if not os.path.isfile(UpperCAmelCase_) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info("Skipping file")
continue
with open(UpperCAmelCase_ , encoding="utf-8") as f:
lowerCamelCase__: Union[str, Any] =f.readlines()
lowerCamelCase__: int =[]
lowerCamelCase__: Any =False
lowerCamelCase__: int =False
lowerCamelCase__: int =[]
for line in lines:
lowerCamelCase__: List[Any] =line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
lowerCamelCase__: List[Any] ="import datasets\n"
elif "import tensorflow" in out_line:
# order is important here
lowerCamelCase__: Union[str, Any] =""
continue
elif "from absl import logging" in out_line:
lowerCamelCase__: Tuple ="from datasets import logging\n"
elif "getLogger" in out_line:
lowerCamelCase__: List[str] =out_line.replace("getLogger" , "get_logger")
elif any(expression in out_line for expression in TO_HIGHLIGHT):
lowerCamelCase__: str =True
lowerCamelCase__: List[Any] =list(filter(lambda UpperCAmelCase_: e in out_line , UpperCAmelCase_))
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(UpperCAmelCase_) + "\n")
out_lines.append(UpperCAmelCase_)
out_lines.append(UpperCAmelCase_)
continue
else:
for pattern, replacement in TO_CONVERT:
lowerCamelCase__: Dict =re.sub(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
lowerCamelCase__: Any =re.match(R"from\stensorflow_datasets.*import\s([^\.\r\n]+)" , UpperCAmelCase_)
tfds_imports.extend(imp.strip() for imp in match.group(1).split(","))
lowerCamelCase__: Any ="from . import " + match.group(1)
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(F"""Error converting {out_line.strip()}""")
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
lowerCamelCase__: Optional[int] =True
out_lines.append(UpperCAmelCase_)
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
lowerCamelCase__: Tuple =f_name.replace(".py" , "")
lowerCamelCase__: Optional[int] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =os.path.join(UpperCAmelCase_ , UpperCAmelCase_)
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_)
self._logger.info(F"""Adding directory {output_dir}""")
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports})
else:
# Utilities will be moved at the end
utils_files.append(UpperCAmelCase_)
if needs_manual_update:
with_manual_update.append(UpperCAmelCase_)
with open(UpperCAmelCase_ , "w" , encoding="utf-8") as f:
f.writelines(UpperCAmelCase_)
self._logger.info(F"""Converted in {output_file}""")
for utils_file in utils_files:
try:
lowerCamelCase__: Union[str, Any] =os.path.basename(UpperCAmelCase_)
lowerCamelCase__: Optional[Any] =imports_to_builder_map[f_name.replace(".py" , "")]
self._logger.info(F"""Moving {dest_folder} to {utils_file}""")
shutil.copy(UpperCAmelCase_ , UpperCAmelCase_)
except KeyError:
self._logger.error(F"""Cannot find destination folder for {utils_file}. Please copy manually.""")
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
F"""You need to manually update file {file_path} to remove configurations using 'TextEncoderConfig'.""")
| 10 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 0 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowerCAmelCase__ :
'''simple docstring'''
@staticmethod
def _lowerCamelCase ( *__lowerCamelCase , **__lowerCamelCase) -> Union[str, Any]:
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowerCAmelCase__ ( unittest.TestCase):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase) -> Optional[Any]:
_A : Dict = ObjectDetectionPipeline(model=__lowerCamelCase , image_processor=__lowerCamelCase)
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Tuple:
_A : Optional[int] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0)
self.assertGreater(len(__lowerCamelCase) , 0)
for detected_object in outputs:
self.assertEqual(
__lowerCamelCase , {
"score": ANY(__lowerCamelCase),
"label": ANY(__lowerCamelCase),
"box": {"xmin": ANY(__lowerCamelCase), "ymin": ANY(__lowerCamelCase), "xmax": ANY(__lowerCamelCase), "ymax": ANY(__lowerCamelCase)},
} , )
import datasets
_A : str = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test")
_A : str = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png"),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
_A : str = object_detector(__lowerCamelCase , threshold=0.0)
self.assertEqual(len(__lowerCamelCase) , len(__lowerCamelCase))
for outputs in batch_outputs:
self.assertGreater(len(__lowerCamelCase) , 0)
for detected_object in outputs:
self.assertEqual(
__lowerCamelCase , {
"score": ANY(__lowerCamelCase),
"label": ANY(__lowerCamelCase),
"box": {"xmin": ANY(__lowerCamelCase), "ymin": ANY(__lowerCamelCase), "xmax": ANY(__lowerCamelCase), "ymax": ANY(__lowerCamelCase)},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF")
def _lowerCamelCase ( self) -> List[Any]:
pass
@require_torch
def _lowerCamelCase ( self) -> List[Any]:
_A : Optional[Any] = "hf-internal-testing/tiny-detr-mobilenetsv3"
_A : Optional[int] = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase)
_A : int = AutoFeatureExtractor.from_pretrained(__lowerCamelCase)
_A : Optional[Any] = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase)
_A : Union[str, Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0)
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
] , )
_A : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self) -> Optional[Any]:
_A : List[Any] = "facebook/detr-resnet-50"
_A : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(__lowerCamelCase)
_A : Optional[int] = AutoFeatureExtractor.from_pretrained(__lowerCamelCase)
_A : Union[str, Any] = ObjectDetectionPipeline(model=__lowerCamelCase , feature_extractor=__lowerCamelCase)
_A : Optional[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
_A : Any = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
])
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self) -> Optional[Any]:
_A : int = "facebook/detr-resnet-50"
_A : str = pipeline("object-detection" , model=__lowerCamelCase)
_A : str = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg")
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
_A : List[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
])
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def _lowerCamelCase ( self) -> int:
_A : Any = 0.9_9_8_5
_A : List[str] = "facebook/detr-resnet-50"
_A : Dict = pipeline("object-detection" , model=__lowerCamelCase)
_A : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__lowerCamelCase)
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def _lowerCamelCase ( self) -> Tuple:
_A : str = "Narsil/layoutlmv3-finetuned-funsd"
_A : Optional[Any] = 0.9_9_9_3
_A : Any = pipeline("object-detection" , model=__lowerCamelCase , threshold=__lowerCamelCase)
_A : List[str] = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png")
self.assertEqual(
nested_simplify(__lowerCamelCase , decimals=4) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
] , )
| 11 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/config.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/config.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/config.json'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/config.json',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/config.json'
),
'distilbert-base-uncased-finetuned-sst-2-english': (
'https://huggingface.co/distilbert-base-uncased-finetuned-sst-2-english/resolve/main/config.json'
),
}
class lowerCamelCase__( __lowerCamelCase):
UpperCAmelCase__ : str = 'distilbert'
UpperCAmelCase__ : Optional[Any] = {
'hidden_size': 'dim',
'num_attention_heads': 'n_heads',
'num_hidden_layers': 'n_layers',
}
def __init__( self: str , UpperCamelCase_: Union[str, Any]=3_05_22 , UpperCamelCase_: Tuple=5_12 , UpperCamelCase_: Union[str, Any]=False , UpperCamelCase_: Union[str, Any]=6 , UpperCamelCase_: Tuple=12 , UpperCamelCase_: int=7_68 , UpperCamelCase_: Any=4 * 7_68 , UpperCamelCase_: int=0.1 , UpperCamelCase_: List[str]=0.1 , UpperCamelCase_: Optional[Any]="gelu" , UpperCamelCase_: Union[str, Any]=0.02 , UpperCamelCase_: Optional[int]=0.1 , UpperCamelCase_: Optional[int]=0.2 , UpperCamelCase_: Any=0 , **UpperCamelCase_: Optional[Any] , ):
__lowerCamelCase = vocab_size
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = sinusoidal_pos_embds
__lowerCamelCase = n_layers
__lowerCamelCase = n_heads
__lowerCamelCase = dim
__lowerCamelCase = hidden_dim
__lowerCamelCase = dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = activation
__lowerCamelCase = initializer_range
__lowerCamelCase = qa_dropout
__lowerCamelCase = seq_classif_dropout
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ )
class lowerCamelCase__( __lowerCamelCase):
@property
def lowerCAmelCase__ ( self: Tuple ):
if self.task == "multiple-choice":
__lowerCamelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__lowerCamelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 12 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
lowerCAmelCase : Union[str, Any] = {0: [2, 3], 1: [0], 2: [1], 3: [4], 4: []}
lowerCAmelCase : str = {0: [1, 2, 3], 1: [2], 2: [0], 3: [4], 4: [5], 5: [3]}
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] = True
SCREAMING_SNAKE_CASE_: Any = []
for neighbour in graph[vert]:
if not visited[neighbour]:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
order.append(_UpperCAmelCase )
return order
def A_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: int = True
SCREAMING_SNAKE_CASE_: Any = [vert]
for neighbour in reversed_graph[vert]:
if not visited[neighbour]:
component += find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
return component
def A_ ( _UpperCAmelCase ):
SCREAMING_SNAKE_CASE_: Optional[Any] = len(_UpperCAmelCase ) * [False]
SCREAMING_SNAKE_CASE_: dict[int, list[int]] = {vert: [] for vert in range(len(_UpperCAmelCase ) )}
for vert, neighbours in graph.items():
for neighbour in neighbours:
reversed_graph[neighbour].append(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_: str = []
for i, was_visited in enumerate(_UpperCAmelCase ):
if not was_visited:
order += topology_sort(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE_: List[str] = []
SCREAMING_SNAKE_CASE_: List[Any] = len(_UpperCAmelCase ) * [False]
for i in range(len(_UpperCAmelCase ) ):
SCREAMING_SNAKE_CASE_: List[str] = order[len(_UpperCAmelCase ) - i - 1]
if not visited[vert]:
SCREAMING_SNAKE_CASE_: int = find_components(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
components_list.append(_UpperCAmelCase )
return components_list
| 13 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 0 |
_lowerCamelCase : Tuple = """
# Transformers 설치 방법
! pip install transformers datasets
# 마지막 릴리스 대신 소스에서 설치하려면, 위 명령을 주석으로 바꾸고 아래 명령을 해제하세요.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
_lowerCamelCase : Tuple = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
_lowerCamelCase : Any = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 14 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 0 |
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE :Dict = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = AlbertTokenizer
snake_case_ = AlbertTokenizerFast
snake_case_ = True
snake_case_ = True
snake_case_ = True
def UpperCamelCase_ ( self : Union[str, Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
__A = AlbertTokenizer(A )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCamelCase_ ( self : Tuple ,A : List[str] ):
__A = "this is a test"
__A = "this is a test"
return input_text, output_text
def UpperCamelCase_ ( self : List[Any] ):
__A = "<pad>"
__A = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(A ) ,A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(A ) ,A )
def UpperCamelCase_ ( self : Optional[Any] ):
__A = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,"<pad>" )
self.assertEqual(vocab_keys[1] ,"<unk>" )
self.assertEqual(vocab_keys[-1] ,"▁eloquent" )
self.assertEqual(len(A ) ,3_00_00 )
def UpperCamelCase_ ( self : str ):
self.assertEqual(self.get_tokenizer().vocab_size ,3_00_00 )
def UpperCamelCase_ ( self : str ):
if not self.test_rust_tokenizer:
return
__A = self.get_tokenizer()
__A = self.get_rust_tokenizer()
__A = "I was born in 92000, and this is falsé."
__A = tokenizer.tokenize(A )
__A = rust_tokenizer.tokenize(A )
self.assertListEqual(A ,A )
__A = tokenizer.encode(A ,add_special_tokens=A )
__A = rust_tokenizer.encode(A ,add_special_tokens=A )
self.assertListEqual(A ,A )
__A = self.get_rust_tokenizer()
__A = tokenizer.encode(A )
__A = rust_tokenizer.encode(A )
self.assertListEqual(A ,A )
def UpperCamelCase_ ( self : Any ):
__A = AlbertTokenizer(A ,keep_accents=A )
__A = tokenizer.tokenize("This is a test" )
self.assertListEqual(A ,["▁this", "▁is", "▁a", "▁test"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A ) ,[48, 25, 21, 12_89] )
__A = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
A ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "é", "."] )
__A = tokenizer.convert_tokens_to_ids(A )
self.assertListEqual(A ,[31, 23, 3_86, 19, 5_61, 30_50, 15, 17, 48, 25, 82_56, 18, 1, 9] )
__A = tokenizer.convert_ids_to_tokens(A )
self.assertListEqual(
A ,["▁i", "▁was", "▁born", "▁in", "▁9", "2000", ",", "▁and", "▁this", "▁is", "▁fal", "s", "<unk>", "."] ,)
def UpperCamelCase_ ( self : List[str] ):
__A = AlbertTokenizer(A )
__A = tokenizer.encode("sequence builders" )
__A = tokenizer.encode("multi-sequence build" )
__A = tokenizer.build_inputs_with_special_tokens(A )
__A = tokenizer.build_inputs_with_special_tokens(A ,A )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def UpperCamelCase_ ( self : int ):
# fmt: off
__A = {"attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "input_ids": [[2, 2_19_70, 13, 5, 60_92, 1_67, 28, 71_03, 21_53, 6_73, 8, 70_28, 1_20_51, 18, 17, 71_03, 21_53, 6_73, 8, 35_15, 1_86_84, 8, 44_61, 6, 19_27, 2_97, 8, 1_20_60, 26_07, 18, 13, 5, 44_61, 15, 1_05_38, 38, 8, 1_35, 15, 8_22, 58, 15, 9_93, 1_03_63, 15, 14_60, 80_05, 44_61, 15, 9_93, 2_55, 23_28, 9, 9, 9, 6, 26, 11_12, 8_16, 32_60, 13, 5, 1_03, 23_77, 6, 17, 11_12, 8_16, 27_82, 13, 5, 1_03, 1_06_41, 6, 29, 84, 25_12, 24_30, 7_82, 1_86_84, 27_61, 19, 8_08, 24_30, 25_56, 17, 8_55, 14_80, 94_77, 40_91, 1_28, 1_17_12, 15, 71_03, 21_53, 6_73, 17, 2_48_83, 99_90, 9, 3], [2, 1_15_02, 25, 10_06, 20, 7_82, 8, 1_18_09, 8_55, 17_32, 1_93_93, 1_86_67, 37, 3_67, 2_10_18, 69, 18_54, 34, 1_18_60, 1_91_24, 27, 1_56, 2_25, 17, 1_93, 41_41, 19, 65, 91_24, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 22_31, 8_86, 23_85, 1_76_59, 84, 14, 1_67_92, 19_52, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=A ,model_name="albert-base-v2" ,revision="6b6560eaf5ff2e250b00c50f380c5389a9c2d82e" ,)
| 15 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
"""simple docstring"""
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
lowerCAmelCase_ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
lowerCAmelCase_ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
lowerCAmelCase_ = BeautifulSoup(res.text, 'html.parser')
lowerCAmelCase_ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 16 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : List[str] ):
__lowercase = []
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any] ):
return self.node_position[vertex]
def _lowercase ( self : Dict, UpperCAmelCase__ : List[str], UpperCAmelCase__ : str ):
__lowercase = pos
def _lowercase ( self : List[str], UpperCAmelCase__ : int, UpperCAmelCase__ : Dict, UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowercase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowercase = 2 * start + 1
else:
__lowercase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowercase ,__lowercase = heap[smallest_child], positions[smallest_child]
__lowercase ,__lowercase = (
heap[start],
positions[start],
)
__lowercase ,__lowercase = temp, tempa
__lowercase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child], self.get_position(positions[start] ) )
self.set_position(positions[start], UpperCAmelCase__ )
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__ )
def _lowercase ( self : List[Any], UpperCAmelCase__ : Dict, UpperCAmelCase__ : str, UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : Tuple ):
__lowercase = position[index]
while index != 0:
__lowercase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowercase = heap[parent]
__lowercase = position[parent]
self.set_position(position[parent], UpperCAmelCase__ )
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, UpperCAmelCase__ )
break
__lowercase = parent
else:
__lowercase = val
__lowercase = temp
self.set_position(UpperCAmelCase__, 0 )
def _lowercase ( self : Optional[int], UpperCAmelCase__ : Union[str, Any], UpperCAmelCase__ : Any ):
__lowercase = len(UpperCAmelCase__ ) // 2 - 1
for i in range(UpperCAmelCase__, -1, -1 ):
self.top_to_bottom(UpperCAmelCase__, UpperCAmelCase__, len(UpperCAmelCase__ ), UpperCAmelCase__ )
def _lowercase ( self : Union[str, Any], UpperCAmelCase__ : Optional[Any], UpperCAmelCase__ : List[Any] ):
__lowercase = positions[0]
__lowercase = sys.maxsize
self.top_to_bottom(UpperCAmelCase__, 0, len(UpperCAmelCase__ ), UpperCAmelCase__ )
return temp
def _A ( UpperCamelCase_ : Dict) -> Optional[Any]:
'''simple docstring'''
__lowercase = Heap()
__lowercase = [0] * len(UpperCamelCase_)
__lowercase = [-1] * len(UpperCamelCase_) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowercase = [] # Heap of Distance of vertices from their neighboring vertex
__lowercase = []
for vertex in range(len(UpperCamelCase_)):
distance_tv.append(sys.maxsize)
positions.append(UpperCamelCase_)
heap.node_position.append(UpperCamelCase_)
__lowercase = []
__lowercase = 1
__lowercase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowercase = 0
__lowercase = distance
heap.heapify(UpperCamelCase_, UpperCamelCase_)
for _ in range(1, len(UpperCamelCase_)):
__lowercase = heap.delete_minimum(UpperCamelCase_, UpperCamelCase_)
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex))
__lowercase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase_)]
):
__lowercase = distance
heap.bottom_to_top(
UpperCamelCase_, heap.get_position(UpperCamelCase_), UpperCamelCase_, UpperCamelCase_)
__lowercase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_a = int(input('Enter number of edges: ').strip())
_a = defaultdict(list)
for _ in range(edges_number):
_a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 17 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = {
'''configuration_xlm_roberta''': [
'''XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''XLMRobertaConfig''',
'''XLMRobertaOnnxConfig''',
],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ['''XLMRobertaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ['''XLMRobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XLMRobertaForCausalLM''',
'''XLMRobertaForMaskedLM''',
'''XLMRobertaForMultipleChoice''',
'''XLMRobertaForQuestionAnswering''',
'''XLMRobertaForSequenceClassification''',
'''XLMRobertaForTokenClassification''',
'''XLMRobertaModel''',
'''XLMRobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = [
'''TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXLMRobertaForCausalLM''',
'''TFXLMRobertaForMaskedLM''',
'''TFXLMRobertaForMultipleChoice''',
'''TFXLMRobertaForQuestionAnswering''',
'''TFXLMRobertaForSequenceClassification''',
'''TFXLMRobertaForTokenClassification''',
'''TFXLMRobertaModel''',
'''TFXLMRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'''FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxXLMRobertaForMaskedLM''',
'''FlaxXLMRobertaForCausalLM''',
'''FlaxXLMRobertaForMultipleChoice''',
'''FlaxXLMRobertaForQuestionAnswering''',
'''FlaxXLMRobertaForSequenceClassification''',
'''FlaxXLMRobertaForTokenClassification''',
'''FlaxXLMRobertaModel''',
'''FlaxXLMRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP,
XLMRobertaConfig,
XLMRobertaOnnxConfig,
)
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta import XLMRobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlm_roberta_fast import XLMRobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlm_roberta import (
XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
XLMRobertaForCausalLM,
XLMRobertaForMaskedLM,
XLMRobertaForMultipleChoice,
XLMRobertaForQuestionAnswering,
XLMRobertaForSequenceClassification,
XLMRobertaForTokenClassification,
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlm_roberta import (
TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLMRobertaForCausalLM,
TFXLMRobertaForMaskedLM,
TFXLMRobertaForMultipleChoice,
TFXLMRobertaForQuestionAnswering,
TFXLMRobertaForSequenceClassification,
TFXLMRobertaForTokenClassification,
TFXLMRobertaModel,
TFXLMRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xlm_roberta import (
FLAX_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxXLMRobertaForCausalLM,
FlaxXLMRobertaForMaskedLM,
FlaxXLMRobertaForMultipleChoice,
FlaxXLMRobertaForQuestionAnswering,
FlaxXLMRobertaForSequenceClassification,
FlaxXLMRobertaForTokenClassification,
FlaxXLMRobertaModel,
FlaxXLMRobertaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def lowerCamelCase_ ( lowerCamelCase__ ):
return (data["data"], data["target"])
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = XGBClassifier()
classifier.fit(lowerCamelCase__ , lowerCamelCase__ )
return classifier
def lowerCamelCase_ ( ):
lowerCamelCase_ = load_iris()
lowerCamelCase_ , lowerCamelCase_ = data_handling(lowerCamelCase__ )
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = train_test_split(
lowerCamelCase__ , lowerCamelCase__ , test_size=0.25 )
lowerCamelCase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
lowerCamelCase_ = xgboost(lowerCamelCase__ , lowerCamelCase__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , display_labels=lowerCamelCase__ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 19 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> float:
def get_matched_characters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> str:
lowercase : Tuple = []
lowercase : Union[str, Any] = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
lowercase : Dict = int(max(0 , i - limit ) )
lowercase : Dict = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(SCREAMING_SNAKE_CASE__ )
lowercase : Any = f"{_stra[0:_stra.index(SCREAMING_SNAKE_CASE__ )]} {_stra[_stra.index(SCREAMING_SNAKE_CASE__ ) + 1:]}"
return "".join(SCREAMING_SNAKE_CASE__ )
# matching characters
lowercase : Optional[Any] = get_matched_characters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : List[Any] = get_matched_characters(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowercase : Union[str, Any] = len(SCREAMING_SNAKE_CASE__ )
# transposition
lowercase : List[Any] = (
len([(ca, ca) for ca, ca in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) if ca != ca] ) // 2
)
if not match_count:
lowercase : str = 0.0
else:
lowercase : Union[str, Any] = (
1
/ 3
* (
match_count / len(SCREAMING_SNAKE_CASE__ )
+ match_count / len(SCREAMING_SNAKE_CASE__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
lowercase : Union[str, Any] = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler("""hello""", """world"""))
| 20 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
from __future__ import annotations
def UpperCamelCase_( lowerCamelCase_ , lowerCamelCase_ ) -> float:
_lowercase : Optional[int] = sorted(numsa + numsa )
_lowercase , _lowercase : int = divmod(len(lowerCamelCase_ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE : List[str] = [float(x) for x in input("Enter the elements of first array: ").split()]
SCREAMING_SNAKE_CASE : str = [float(x) for x in input("Enter the elements of second array: ").split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 21 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A_ ( lowerCAmelCase_ ):
_lowerCamelCase : List[str] = ["""image_processor""", """tokenizer"""]
_lowerCamelCase : Optional[int] = """LayoutLMv3ImageProcessor"""
_lowerCamelCase : Union[str, Any] = ("""LayoutLMv3Tokenizer""", """LayoutLMv3TokenizerFast""")
def __init__( self : Tuple , snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None , **snake_case_ : List[str] ):
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , snake_case_ , )
_UpperCAmelCase = kwargs.pop("feature_extractor" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(snake_case_ , snake_case_ )
def __call__( self : List[str] , snake_case_ : Tuple , snake_case_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , snake_case_ : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , snake_case_ : Union[List[List[int]], List[List[List[int]]]] = None , snake_case_ : Optional[Union[List[int], List[List[int]]]] = None , snake_case_ : bool = True , snake_case_ : Union[bool, str, PaddingStrategy] = False , snake_case_ : Union[bool, str, TruncationStrategy] = None , snake_case_ : Optional[int] = None , snake_case_ : int = 0 , snake_case_ : Optional[int] = None , snake_case_ : Optional[bool] = None , snake_case_ : Optional[bool] = None , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = False , snake_case_ : bool = True , snake_case_ : Optional[Union[str, TensorType]] = None , **snake_case_ : Optional[Any] , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_UpperCAmelCase = self.image_processor(images=snake_case_ , return_tensors=snake_case_ )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(snake_case_ , snake_case_ ):
_UpperCAmelCase = [text] # add batch dimension (as the image processor always adds a batch dimension)
_UpperCAmelCase = features["words"]
_UpperCAmelCase = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , stride=snake_case_ , pad_to_multiple_of=snake_case_ , return_token_type_ids=snake_case_ , return_attention_mask=snake_case_ , return_overflowing_tokens=snake_case_ , return_special_tokens_mask=snake_case_ , return_offsets_mapping=snake_case_ , return_length=snake_case_ , verbose=snake_case_ , return_tensors=snake_case_ , **snake_case_ , )
# add pixel values
_UpperCAmelCase = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_UpperCAmelCase = self.get_overflowing_images(snake_case_ , encoded_inputs["overflow_to_sample_mapping"] )
_UpperCAmelCase = images
return encoded_inputs
def lowercase ( self : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Tuple ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_UpperCAmelCase = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(snake_case_ )} and {len(snake_case_ )}' )
return images_with_overflow
def lowercase ( self : Tuple , *snake_case_ : Optional[int] , **snake_case_ : Any ):
return self.tokenizer.batch_decode(*snake_case_ , **snake_case_ )
def lowercase ( self : Optional[Any] , *snake_case_ : List[Any] , **snake_case_ : Any ):
return self.tokenizer.decode(*snake_case_ , **snake_case_ )
@property
def lowercase ( self : Optional[Any] ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def lowercase ( self : Union[str, Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , snake_case_ , )
return self.image_processor_class
@property
def lowercase ( self : Tuple ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , snake_case_ , )
return self.image_processor
| 22 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
'''simple docstring'''
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
UpperCamelCase__: List[Any] = pytest.mark.integration
@pytest.mark.parametrize('''path''' , ['''paws''', '''csv'''] )
def snake_case_ ( _lowerCAmelCase : int , _lowerCAmelCase : int ) -> str:
inspect_dataset(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Optional[Any] = path + '''.py'''
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' , ['''accuracy'''] )
def snake_case_ ( _lowerCAmelCase : List[str] , _lowerCAmelCase : List[str] ) -> List[str]:
inspect_metric(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase : Dict = path + '''.py'''
assert script_name in os.listdir(_lowerCAmelCase )
assert "__pycache__" not in os.listdir(_lowerCAmelCase )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Optional[Any] ) -> Any:
UpperCAmelCase : Tuple = get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def snake_case_ ( _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Any , _lowerCAmelCase : Optional[Any] ) -> Dict:
with pytest.raises(_lowerCAmelCase ):
get_dataset_config_info(_lowerCAmelCase , config_name=_lowerCAmelCase )
@pytest.mark.parametrize(
'''path, expected''' , [
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] , )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : Tuple ) -> Tuple:
UpperCAmelCase : List[str] = get_dataset_config_names(_lowerCAmelCase )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' , [
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( _lowerCAmelCase : Optional[int] , _lowerCAmelCase : int , _lowerCAmelCase : Optional[int] ) -> Dict:
UpperCAmelCase : str = get_dataset_infos(_lowerCAmelCase )
assert list(infos.keys() ) == expected_configs
UpperCAmelCase : int = expected_configs[0]
assert expected_config in infos
UpperCAmelCase : str = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' , [
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] , )
def snake_case_ ( _lowerCAmelCase : str , _lowerCAmelCase : Any , _lowerCAmelCase : Tuple ) -> Tuple:
UpperCAmelCase : int = get_dataset_infos(_lowerCAmelCase )
assert expected_config in infos
UpperCAmelCase : Dict = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' , [
('''paws''', None, ValueError),
] , )
def snake_case_ ( _lowerCAmelCase : Dict , _lowerCAmelCase : List[Any] , _lowerCAmelCase : int ) -> int:
with pytest.raises(_lowerCAmelCase ):
get_dataset_split_names(_lowerCAmelCase , config_name=_lowerCAmelCase )
| 23 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import is_speech_available, is_vision_available
from transformers.testing_utils import require_torch
if is_vision_available():
from transformers import TvltImageProcessor
if is_speech_available():
from transformers import TvltFeatureExtractor
from transformers import TvltProcessor
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = '''ZinengTang/tvlt-base'''
__snake_case = tempfile.mkdtemp()
def a (self : str , **a__ : Union[str, Any] ):
"""simple docstring"""
return TvltImageProcessor.from_pretrained(self.checkpoint , **a__ )
def a (self : List[Any] , **a__ : List[Any] ):
"""simple docstring"""
return TvltFeatureExtractor.from_pretrained(self.checkpoint , **a__ )
def a (self : Dict ):
"""simple docstring"""
shutil.rmtree(self.tmpdirname )
def a (self : Optional[int] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_feature_extractor()
__snake_case = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
processor.save_pretrained(self.tmpdirname )
__snake_case = TvltProcessor.from_pretrained(self.tmpdirname )
self.assertIsInstance(processor.feature_extractor , a__ )
self.assertIsInstance(processor.image_processor , a__ )
def a (self : Any ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_feature_extractor()
__snake_case = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
__snake_case = np.ones([1_2000] )
__snake_case = feature_extractor(a__ , return_tensors='''np''' )
__snake_case = processor(audio=a__ , return_tensors='''np''' )
for key in audio_dict.keys():
self.assertAlmostEqual(audio_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_feature_extractor()
__snake_case = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
__snake_case = np.ones([3, 224, 224] )
__snake_case = image_processor(a__ , return_tensors='''np''' )
__snake_case = processor(images=a__ , return_tensors='''np''' )
for key in image_dict.keys():
self.assertAlmostEqual(image_dict[key].sum() , input_processor[key].sum() , delta=1E-2 )
def a (self : List[Any] ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_feature_extractor()
__snake_case = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
__snake_case = np.ones([1_2000] )
__snake_case = np.ones([3, 224, 224] )
__snake_case = processor(audio=a__ , images=a__ )
self.assertListEqual(list(inputs.keys() ) , ['''audio_values''', '''audio_mask''', '''pixel_values''', '''pixel_mask'''] )
# test if it raises when no input is passed
with pytest.raises(a__ ):
processor()
def a (self : Dict ):
"""simple docstring"""
__snake_case = self.get_image_processor()
__snake_case = self.get_feature_extractor()
__snake_case = TvltProcessor(image_processor=a__ , feature_extractor=a__ )
self.assertListEqual(
processor.model_input_names , image_processor.model_input_names + feature_extractor.model_input_names , msg='''`processor` and `image_processor`+`feature_extractor` model input names do not match''' , )
| 24 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
"""simple docstring"""
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class lowerCAmelCase_ :
"""simple docstring"""
def __init__(self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=13 , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=33 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__=5 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=37 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=5_12 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=4 , SCREAMING_SNAKE_CASE__=None , ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = seq_length
SCREAMING_SNAKE_CASE__ : List[str] = is_training
SCREAMING_SNAKE_CASE__ : Dict = use_input_mask
SCREAMING_SNAKE_CASE__ : int = use_token_type_ids
SCREAMING_SNAKE_CASE__ : Dict = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE__ : Any = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = num_choices
SCREAMING_SNAKE_CASE__ : Tuple = scope
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = None
if self.use_input_mask:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
SCREAMING_SNAKE_CASE__ : List[str] = None
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = EsmModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = EsmForMaskedLM(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : List[Any] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __magic_name__ (self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = self.num_labels
SCREAMING_SNAKE_CASE__ : Tuple = EsmForTokenClassification(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) , (
SCREAMING_SNAKE_CASE__
) ,
) : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ (a__ , a__ , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : int = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = ()
__UpperCamelCase : List[Any] = (
{
'''feature-extraction''': EsmModel,
'''fill-mask''': EsmForMaskedLM,
'''text-classification''': EsmForSequenceClassification,
'''token-classification''': EsmForTokenClassification,
'''zero-shot''': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = True
def __magic_name__ (self ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = EsmModelTester(self )
SCREAMING_SNAKE_CASE__ : int = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , hidden_size=37 )
def __magic_name__ (self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def __magic_name__ (self ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = type
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*SCREAMING_SNAKE_CASE__ )
@slow
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Tuple = EsmModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ : Dict = EsmEmbeddings(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
SCREAMING_SNAKE_CASE__ : List[Any] = create_position_ids_from_input_ids(SCREAMING_SNAKE_CASE__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()[0]
SCREAMING_SNAKE_CASE__ : Any = EsmEmbeddings(config=SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.empty(2 , 4 , 30 )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.as_tensor([expected_single_positions, expected_single_positions] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = embeddings.create_position_ids_from_inputs_embeds(SCREAMING_SNAKE_CASE__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) ) )
@unittest.skip("""Esm does not support embedding resizing""" )
def __magic_name__ (self ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip("""Esm does not support embedding resizing""" )
def __magic_name__ (self ) -> Optional[int]:
"""simple docstring"""
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __magic_name__ (self ) -> Tuple:
"""simple docstring"""
pass
@require_torch
class lowerCAmelCase_ (a__ ):
"""simple docstring"""
@slow
def __magic_name__ (self ) -> Any:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = EsmForMaskedLM.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[0, 1, 2, 3, 4, 5]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(SCREAMING_SNAKE_CASE__ )[0]
SCREAMING_SNAKE_CASE__ : Tuple = 33
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[[8.9215, -10.5898, -6.4671], [-6.3967, -13.9114, -1.1212], [-7.7812, -13.9516, -3.7406]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def __magic_name__ (self ) -> List[str]:
"""simple docstring"""
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : int = EsmModel.from_pretrained("""facebook/esm2_t6_8M_UR50D""" )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
SCREAMING_SNAKE_CASE__ : Tuple = model(SCREAMING_SNAKE_CASE__ )[0]
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[[0.1444, 0.5413, 0.3248], [0.3034, 0.0053, 0.3108], [0.3228, -0.2499, 0.3415]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 25 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
class lowercase ( unittest.TestCase,UpperCamelCase__ ):
def a__ ( self ) -> Union[str, Any]:
_A : Union[str, Any] = load_tool("""text-classification""" )
self.tool.setup()
_A : Optional[Any] = load_tool("""text-classification""" , remote=_a )
def a__ ( self ) -> str:
_A : str = self.tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(_a , """positive""" )
def a__ ( self ) -> Union[str, Any]:
_A : Tuple = self.remote_tool("""That's quite cool""" , ["""positive""", """negative"""] )
self.assertEqual(_a , """positive""" )
def a__ ( self ) -> Optional[Any]:
_A : Union[str, Any] = self.tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(_a , """positive""" )
def a__ ( self ) -> Dict:
_A : List[Any] = self.remote_tool(text="""That's quite cool""" , labels=["""positive""", """negative"""] )
self.assertEqual(_a , """positive""" )
| 26 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
from typing import Any
class __UpperCamelCase :
def __init__( self , __a ):
'''simple docstring'''
__a : Optional[int] = data
__a : List[Any] = None
def __repr__( self ):
'''simple docstring'''
return f"""Node({self.data})"""
class __UpperCamelCase :
def __init__( self ):
'''simple docstring'''
__a : Optional[Any] = None
def __iter__( self ):
'''simple docstring'''
__a : List[str] = self.head
while node:
yield node.data
__a : Union[str, Any] = node.next
def __len__( self ):
'''simple docstring'''
return sum(1 for _ in self )
def __repr__( self ):
'''simple docstring'''
return "->".join([str(__a ) for item in self] )
def __getitem__( self , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self , __a , __a ):
'''simple docstring'''
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
__a : Any = self.head
for _ in range(__a ):
__a : int = current.next
__a : str = data
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(len(self ) , __a )
def __UpperCAmelCase ( self , __a ):
'''simple docstring'''
self.insert_nth(0 , __a )
def __UpperCAmelCase ( self , __a , __a ):
'''simple docstring'''
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
__a : List[Any] = Node(__a )
if self.head is None:
__a : Optional[Any] = new_node
elif index == 0:
__a : Optional[Any] = self.head # link new_node to head
__a : Union[str, Any] = new_node
else:
__a : Any = self.head
for _ in range(index - 1 ):
__a : Optional[int] = temp.next
__a : List[Any] = temp.next
__a : List[str] = new_node
def __UpperCAmelCase ( self ): # print every node data
'''simple docstring'''
print(self )
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.delete_nth(0 )
def __UpperCAmelCase ( self ): # delete from tail
'''simple docstring'''
return self.delete_nth(len(self ) - 1 )
def __UpperCAmelCase ( self , __a = 0 ):
'''simple docstring'''
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
__a : Optional[int] = self.head # default first node
if index == 0:
__a : Optional[Any] = self.head.next
else:
__a : int = self.head
for _ in range(index - 1 ):
__a : Any = temp.next
__a : Any = temp.next
__a : Any = temp.next.next
return delete_node.data
def __UpperCAmelCase ( self ):
'''simple docstring'''
return self.head is None
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = None
__a : Dict = self.head
while current:
# Store the current node's next node.
__a : Optional[int] = current.next
# Make the current node's next point backwards
__a : Any = prev
# Make the previous node be the current node
__a : str = current
# Make the current node the next node (to progress iteration)
__a : Dict = next_node
# Return prev in order to put the head at the end
__a : Tuple = prev
def lowerCamelCase ():
__a : Tuple = LinkedList()
assert linked_list.is_empty() is True
assert str(_SCREAMING_SNAKE_CASE ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(_SCREAMING_SNAKE_CASE ) == i
linked_list.insert_nth(_SCREAMING_SNAKE_CASE , i + 1 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(_SCREAMING_SNAKE_CASE ) == 9
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
__a : Dict = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(_SCREAMING_SNAKE_CASE ) == "->".join(str(_SCREAMING_SNAKE_CASE ) for i in range(-8 , 1 ) )
def lowerCamelCase ():
__a : Tuple = [
-9,
100,
Node(77_345_112 ),
'dlrow olleH',
7,
5_555,
0,
-1_9_2.5_5_5_5_5,
'Hello, world!',
7_7.9,
Node(10 ),
None,
None,
1_2.2_0,
]
__a : Any = LinkedList()
for i in test_input:
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(_SCREAMING_SNAKE_CASE ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
__a : Union[str, Any] = linked_list.delete_head()
assert result == -9
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
__a : Dict = linked_list.delete_tail()
assert result == 1_2.2
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
__a : Union[str, Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(_SCREAMING_SNAKE_CASE ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(_SCREAMING_SNAKE_CASE )
assert (
str(_SCREAMING_SNAKE_CASE )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(_SCREAMING_SNAKE_CASE )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def lowerCamelCase ():
from doctest import testmod
testmod()
__a : List[str] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(_SCREAMING_SNAKE_CASE )
print('\nReading/changing Node data using indexing:' )
print(F"""Element at Position 1: {linked_list[1]}""" )
__a : Union[str, Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(_SCREAMING_SNAKE_CASE )
print(F"""length of linked_list is : {len(_SCREAMING_SNAKE_CASE )}""" )
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class SCREAMING_SNAKE_CASE ( _a ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """openai/whisper-base"""
_SCREAMING_SNAKE_CASE = (
"""This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the """
"""transcribed text."""
)
_SCREAMING_SNAKE_CASE = """transcriber"""
_SCREAMING_SNAKE_CASE = WhisperProcessor
_SCREAMING_SNAKE_CASE = WhisperForConditionalGeneration
_SCREAMING_SNAKE_CASE = ["""audio"""]
_SCREAMING_SNAKE_CASE = ["""text"""]
def A ( self : Dict , UpperCamelCase__ : Any ):
"""simple docstring"""
return self.pre_processor(UpperCamelCase__ , return_tensors='pt' ).input_features
def A ( self : Optional[Any] , UpperCamelCase__ : Any ):
"""simple docstring"""
return self.model.generate(inputs=UpperCamelCase__ )
def A ( self : Any , UpperCamelCase__ : Optional[Any] ):
"""simple docstring"""
return self.pre_processor.batch_decode(UpperCamelCase__ , skip_special_tokens=UpperCamelCase__ )[0]
| 28 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : str = '''visual_bert'''
def __init__( self , _UpperCamelCase=3_0_5_2_2 , _UpperCamelCase=7_6_8 , _UpperCamelCase=5_1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=1_2 , _UpperCamelCase=3_0_7_2 , _UpperCamelCase="gelu" , _UpperCamelCase=0.1 , _UpperCamelCase=0.1 , _UpperCamelCase=5_1_2 , _UpperCamelCase=2 , _UpperCamelCase=0.02 , _UpperCamelCase=1E-12 , _UpperCamelCase=False , _UpperCamelCase=True , _UpperCamelCase=1 , _UpperCamelCase=0 , _UpperCamelCase=2 , **_UpperCamelCase , ) -> List[Any]:
super().__init__(pad_token_id=_UpperCamelCase , bos_token_id=_UpperCamelCase , eos_token_id=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : Any = vocab_size
UpperCAmelCase_ : List[Any] = max_position_embeddings
UpperCAmelCase_ : Dict = hidden_size
UpperCAmelCase_ : Any = visual_embedding_dim
UpperCAmelCase_ : Any = num_hidden_layers
UpperCAmelCase_ : str = num_attention_heads
UpperCAmelCase_ : int = intermediate_size
UpperCAmelCase_ : Tuple = hidden_act
UpperCAmelCase_ : Any = hidden_dropout_prob
UpperCAmelCase_ : Optional[Any] = attention_probs_dropout_prob
UpperCAmelCase_ : Any = initializer_range
UpperCAmelCase_ : Union[str, Any] = type_vocab_size
UpperCAmelCase_ : List[Any] = layer_norm_eps
UpperCAmelCase_ : Any = bypass_transformer
UpperCAmelCase_ : Union[str, Any] = special_visual_initialize
| 29 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 30 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
'''simple docstring'''
class lowerCamelCase_ :
'''simple docstring'''
def __init__( self : int , A : int ):
# we need a list not a string, so do something to change the type
_UpperCAmelCase : int = arr.split("," )
def _A ( self : List[Any] ):
_UpperCAmelCase : Optional[Any] = [int(self.array[0] )] * len(self.array )
_UpperCAmelCase : List[str] = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_UpperCAmelCase : Optional[int] = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_UpperCAmelCase : List[str] = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE : Any = input("""please input some numbers:""")
__SCREAMING_SNAKE_CASE : Optional[int] = SubArray(whole_array)
__SCREAMING_SNAKE_CASE : Any = array.solve_sub_array()
print(("""the results is:""", re))
| 31 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
from ..utils import DummyObject, requires_backends
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : List[str] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[Any] , *SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : str , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : str ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : str , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : Any ) -> Optional[int]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[Any] , *SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[str] ) -> Tuple:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Dict , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Any ) -> Tuple:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> List[Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Tuple = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Dict:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : int , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Dict , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : str = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : Union[str, Any] , *SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Dict ) -> int:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Optional[Any] , *SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Any ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
class SCREAMING_SNAKE_CASE__ ( metaclass=lowercase__ ):
snake_case__ : Union[str, Any] = ['''torch''', '''transformers''', '''onnx''']
def __init__( self : List[str] , *SCREAMING_SNAKE_CASE__ : Tuple , **SCREAMING_SNAKE_CASE__ : Tuple ) -> List[Any]:
requires_backends(self , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : Tuple , *SCREAMING_SNAKE_CASE__ : List[str] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
@classmethod
def SCREAMING_SNAKE_CASE ( cls : List[str] , *SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Dict:
requires_backends(cls , ['torch', 'transformers', 'onnx'] )
| 32 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 0 |
"""simple docstring"""
def lowercase ( __snake_case : List[str] , __snake_case : Optional[int] ):
# "extended trapezoidal rule"
# int(f) = dx/2 * (f1 + 2f2 + ... + fn)
lowercase_ : List[Any] = (boundary[1] - boundary[0]) / steps
lowercase_ : List[str] = boundary[0]
lowercase_ : int = boundary[1]
lowercase_ : Optional[int] = make_points(__snake_case , __snake_case , __snake_case )
lowercase_ : str = 0.0
y += (h / 2.0) * f(__snake_case )
for i in x_i:
# print(i)
y += h * f(__snake_case )
y += (h / 2.0) * f(__snake_case )
return y
def lowercase ( __snake_case : str , __snake_case : Tuple , __snake_case : Optional[int] ):
lowercase_ : Dict = a + h
while x < (b - h):
yield x
lowercase_ : Optional[int] = x + h
def lowercase ( __snake_case : int ): # enter your function here
lowercase_ : Any = (x - 0) * (x - 0)
return y
def lowercase ( ):
lowercase_ : int = 0.0 # Lower bound of integration
lowercase_ : List[Any] = 1.0 # Upper bound of integration
lowercase_ : List[Any] = 10.0 # define number of steps or resolution
lowercase_ : List[Any] = [a, b] # define boundary of integration
lowercase_ : Optional[Any] = method_a(__snake_case , __snake_case )
print(F'''y = {y}''' )
if __name__ == "__main__":
main()
| 33 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
A ={
'configuration_whisper': ['WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'WhisperConfig', 'WhisperOnnxConfig'],
'feature_extraction_whisper': ['WhisperFeatureExtractor'],
'processing_whisper': ['WhisperProcessor'],
'tokenization_whisper': ['WhisperTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =['WhisperTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'WhisperForConditionalGeneration',
'WhisperModel',
'WhisperPreTrainedModel',
'WhisperForAudioClassification',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFWhisperForConditionalGeneration',
'TFWhisperModel',
'TFWhisperPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A =[
'FlaxWhisperForConditionalGeneration',
'FlaxWhisperModel',
'FlaxWhisperPreTrainedModel',
'FlaxWhisperForAudioClassification',
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
A =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 34 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
'''simple docstring'''
# Function to print upper half of diamond (pyramid)
def __snake_case( _lowerCAmelCase ) -> Any:
for i in range(0 , _lowerCAmelCase ):
for _ in range(0 , n - i - 1 ): # printing spaces
print(""" """ , end="""""" )
for _ in range(0 , i + 1 ): # printing stars
print("""* """ , end="""""" )
print()
def __snake_case( _lowerCAmelCase ) -> List[str]:
for i in range(_lowerCAmelCase , 0 , -1 ):
for _ in range(_lowerCAmelCase , 0 , -1 ): # printing stars
print("""* """ , end="""""" )
print()
for _ in range(n - i + 1 , 0 , -1 ): # printing spaces
print(""" """ , end="""""" )
def __snake_case( _lowerCAmelCase ) -> List[Any]:
if n <= 0:
print(""" ... .... nothing printing :(""" )
return
floyd(_lowerCAmelCase ) # upper half
reverse_floyd(_lowerCAmelCase ) # lower half
if __name__ == "__main__":
print(R"| /\ | |- | |- |--| |\ /| |-")
print(R"|/ \| |- |_ |_ |__| | \/ | |_")
__a = 1
while K:
__a = int(input("enter the number and , and see the magic : "))
print()
pretty_print(user_number)
__a = int(input("press 0 to exit... and 1 to continue..."))
print("Good Bye...")
| 35 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 0 |
from collections.abc import Generator
from math import sin
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) != 32:
raise ValueError("Input must be of length 32" )
_lowerCAmelCase : Optional[Any] = b""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def A ( _lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_lowerCAmelCase : Dict = format(_lowerCamelCase , "08x" )[-8:]
_lowerCAmelCase : int = b""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("utf-8" )
return little_endian_hex
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Tuple = b""
for char in message:
bit_string += format(_lowerCamelCase , "08b" ).encode("utf-8" )
_lowerCAmelCase : Tuple = format(len(_lowerCamelCase ) , "064b" ).encode("utf-8" )
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(_lowerCamelCase ) % 512 != 448:
bit_string += b"0"
bit_string += to_little_endian(start_len[32:] ) + to_little_endian(start_len[:32] )
return bit_string
def A ( _lowerCamelCase ):
'''simple docstring'''
if len(_lowerCamelCase ) % 512 != 0:
raise ValueError("Input must have length that's a multiple of 512" )
for pos in range(0 , len(_lowerCamelCase ) , 512 ):
_lowerCAmelCase : List[str] = bit_string[pos : pos + 512]
_lowerCAmelCase : Optional[Any] = []
for i in range(0 , 512 , 32 ):
block_words.append(int(to_little_endian(block[i : i + 32] ) , 2 ) )
yield block_words
def A ( _lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
_lowerCAmelCase : str = format(_lowerCamelCase , "032b" )
_lowerCAmelCase : Optional[Any] = ""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(_lowerCamelCase , 2 )
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
return (a + b) % 2**32
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if i < 0:
raise ValueError("Input must be non-negative" )
if shift < 0:
raise ValueError("Shift must be non-negative" )
return ((i << shift) ^ (i >> (32 - shift))) % 2**32
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = preprocess(_lowerCamelCase )
_lowerCAmelCase : Tuple = [int(2**32 * abs(sin(i + 1 ) ) ) for i in range(64 )]
# Starting states
_lowerCAmelCase : int = 0X67_45_23_01
_lowerCAmelCase : Optional[Any] = 0XEF_CD_AB_89
_lowerCAmelCase : Union[str, Any] = 0X98_BA_DC_FE
_lowerCAmelCase : Dict = 0X10_32_54_76
_lowerCAmelCase : int = [
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
7,
12,
17,
22,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
5,
9,
14,
20,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
4,
11,
16,
23,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
6,
10,
15,
21,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(_lowerCamelCase ):
_lowerCAmelCase : Dict = aa
_lowerCAmelCase : Union[str, Any] = ba
_lowerCAmelCase : Dict = ca
_lowerCAmelCase : Tuple = da
# Hash current chunk
for i in range(64 ):
if i <= 15:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
_lowerCAmelCase : List[Any] = d ^ (b & (c ^ d))
_lowerCAmelCase : int = i
elif i <= 31:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
_lowerCAmelCase : List[str] = c ^ (d & (b ^ c))
_lowerCAmelCase : Dict = (5 * i + 1) % 16
elif i <= 47:
_lowerCAmelCase : Union[str, Any] = b ^ c ^ d
_lowerCAmelCase : Optional[int] = (3 * i + 5) % 16
else:
_lowerCAmelCase : List[str] = c ^ (b | not_aa(_lowerCamelCase ))
_lowerCAmelCase : int = (7 * i) % 16
_lowerCAmelCase : int = (f + a + added_consts[i] + block_words[g]) % 2**32
_lowerCAmelCase : Optional[int] = d
_lowerCAmelCase : Tuple = c
_lowerCAmelCase : int = b
_lowerCAmelCase : Tuple = sum_aa(_lowerCamelCase , left_rotate_aa(_lowerCamelCase , shift_amounts[i] ) )
# Add hashed chunk to running total
_lowerCAmelCase : List[str] = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Any = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : str = sum_aa(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase ) + reformat_hex(_lowerCamelCase )
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 36 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 0 |
'''simple docstring'''
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
from .record_evaluation import evaluate as evaluate_record
_lowerCAmelCase = '''\
@article{wang2019superglue,
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems},
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R},
journal={arXiv preprint arXiv:1905.00537},
year={2019}
}
'''
_lowerCAmelCase = '''\
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after
GLUE with a new set of more difficult language understanding tasks, improved
resources, and a new public leaderboard.
'''
_lowerCAmelCase = '''
Compute SuperGLUE evaluation metric associated to each SuperGLUE dataset.
Args:
predictions: list of predictions to score. Depending on the SuperGlUE subset:
- for \'record\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'prediction_text\': the predicted answer text
- for \'multirc\': list of question-answer dictionaries with the following keys:
- \'idx\': index of the question-answer pair as specified by the dataset
- \'prediction\': the predicted answer label
- otherwise: list of predicted labels
references: list of reference labels. Depending on the SuperGLUE subset:
- for \'record\': list of question-answers dictionaries with the following keys:
- \'idx\': index of the question as specified by the dataset
- \'answers\': list of possible answers
- otherwise: list of reference labels
Returns: depending on the SuperGLUE subset:
- for \'record\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1\': F1 score
- for \'multirc\':
- \'exact_match\': Exact match between answer and gold answer
- \'f1_m\': Per-question macro-F1 score
- \'f1_a\': Average F1 score over all answers
- for \'axb\':
\'matthews_correlation\': Matthew Correlation
- for \'cb\':
- \'accuracy\': Accuracy
- \'f1\': F1 score
- for all others:
- \'accuracy\': Accuracy
Examples:
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'copa\') # any of ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'cb\')
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'record\')
>>> predictions = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'prediction_text\': \'answer\'}]
>>> references = [{\'idx\': {\'passage\': 0, \'query\': 0}, \'answers\': [\'answer\', \'another_answer\']}]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'multirc\')
>>> predictions = [{\'idx\': {\'answer\': 0, \'paragraph\': 0, \'question\': 0}, \'prediction\': 0}, {\'idx\': {\'answer\': 1, \'paragraph\': 2, \'question\': 3}, \'prediction\': 1}]
>>> references = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'exact_match\': 1.0, \'f1_m\': 1.0, \'f1_a\': 1.0}
>>> super_glue_metric = datasets.load_metric(\'super_glue\', \'axb\')
>>> references = [0, 1]
>>> predictions = [0, 1]
>>> results = super_glue_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'matthews_correlation\': 1.0}
'''
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
return float((preds == labels).mean() )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase , UpperCamelCase="binary" ):
"""simple docstring"""
lowerCAmelCase__ : Any = simple_accuracy(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = float(fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average=UpperCamelCase ) )
return {
"accuracy": acc,
"f1": fa,
}
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : List[str] = {}
for id_pred, label in zip(UpperCamelCase , UpperCamelCase ):
lowerCAmelCase__ : str = f"""{id_pred['idx']['paragraph']}-{id_pred['idx']['question']}"""
lowerCAmelCase__ : Dict = id_pred["""prediction"""]
if question_id in question_map:
question_map[question_id].append((pred, label) )
else:
lowerCAmelCase__ : Optional[int] = [(pred, label)]
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for question, preds_labels in question_map.items():
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = zip(*UpperCamelCase )
lowerCAmelCase__ : List[Any] = fa_score(y_true=UpperCamelCase , y_pred=UpperCamelCase , average="""macro""" )
fas.append(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = int(sum(pred == label for pred, label in preds_labels ) == len(UpperCamelCase ) )
ems.append(UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = float(sum(UpperCamelCase ) / len(UpperCamelCase ) )
lowerCAmelCase__ : List[Any] = sum(UpperCamelCase ) / len(UpperCamelCase )
lowerCAmelCase__ : Dict = float(fa_score(y_true=UpperCamelCase , y_pred=[id_pred["""prediction"""] for id_pred in ids_preds] ) )
return {"exact_match": em, "f1_m": fa_m, "f1_a": fa_a}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self ) -> Optional[Any]:
if self.config_name not in [
"boolq",
"cb",
"copa",
"multirc",
"record",
"rte",
"wic",
"wsc",
"wsc.fixed",
"axb",
"axg",
]:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(self._get_feature_types() ) ,codebase_urls=[] ,reference_urls=[] ,format="""numpy""" if not self.config_name == """record""" and not self.config_name == """multirc""" else None ,)
def UpperCAmelCase_ ( self ) -> str:
if self.config_name == "record":
return {
"predictions": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"prediction_text": datasets.Value("""string""" ),
},
"references": {
"idx": {
"passage": datasets.Value("""int64""" ),
"query": datasets.Value("""int64""" ),
},
"answers": datasets.Sequence(datasets.Value("""string""" ) ),
},
}
elif self.config_name == "multirc":
return {
"predictions": {
"idx": {
"answer": datasets.Value("""int64""" ),
"paragraph": datasets.Value("""int64""" ),
"question": datasets.Value("""int64""" ),
},
"prediction": datasets.Value("""int64""" ),
},
"references": datasets.Value("""int64""" ),
}
else:
return {
"predictions": datasets.Value("""int64""" ),
"references": datasets.Value("""int64""" ),
}
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase ) -> Any:
if self.config_name == "axb":
return {"matthews_correlation": matthews_corrcoef(__UpperCAmelCase ,__UpperCAmelCase )}
elif self.config_name == "cb":
return acc_and_fa(__UpperCAmelCase ,__UpperCAmelCase ,fa_avg="""macro""" )
elif self.config_name == "record":
lowerCAmelCase__ : Optional[Any] = [
{
"""qas""": [
{"""id""": ref["""idx"""]["""query"""], """answers""": [{"""text""": ans} for ans in ref["""answers"""]]}
for ref in references
]
}
]
lowerCAmelCase__ : Union[str, Any] = {pred["""idx"""]["""query"""]: pred["""prediction_text"""] for pred in predictions}
return evaluate_record(__UpperCAmelCase ,__UpperCAmelCase )[0]
elif self.config_name == "multirc":
return evaluate_multirc(__UpperCAmelCase ,__UpperCAmelCase )
elif self.config_name in ["copa", "rte", "wic", "wsc", "wsc.fixed", "boolq", "axg"]:
return {"accuracy": simple_accuracy(__UpperCAmelCase ,__UpperCAmelCase )}
else:
raise KeyError(
"""You should supply a configuration name selected in """
"""[\"boolq\", \"cb\", \"copa\", \"multirc\", \"record\", \"rte\", \"wic\", \"wsc\", \"wsc.fixed\", \"axb\", \"axg\",]""" )
| 37 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCAmelCase_ : Union[str, Any] = {
'''configuration_nezha''': ['''NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''NezhaConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ : List[Any] = [
'''NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NezhaForNextSentencePrediction''',
'''NezhaForMaskedLM''',
'''NezhaForPreTraining''',
'''NezhaForMultipleChoice''',
'''NezhaForQuestionAnswering''',
'''NezhaForSequenceClassification''',
'''NezhaForTokenClassification''',
'''NezhaModel''',
'''NezhaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_nezha import NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP, NezhaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nezha import (
NEZHA_PRETRAINED_MODEL_ARCHIVE_LIST,
NezhaForMaskedLM,
NezhaForMultipleChoice,
NezhaForNextSentencePrediction,
NezhaForPreTraining,
NezhaForQuestionAnswering,
NezhaForSequenceClassification,
NezhaForTokenClassification,
NezhaModel,
NezhaPreTrainedModel,
)
else:
import sys
UpperCAmelCase_ : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 38 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase_ : int = logging.get_logger(__name__)
lowerCAmelCase_ : Tuple = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='big_bird'
def __init__( self : Optional[int] , __a : Dict=5_03_58 , __a : str=7_68 , __a : List[Any]=12 , __a : List[str]=12 , __a : Union[str, Any]=30_72 , __a : str="gelu_new" , __a : Dict=0.1 , __a : Union[str, Any]=0.1 , __a : Any=40_96 , __a : int=2 , __a : Tuple=0.02 , __a : List[Any]=1e-1_2 , __a : int=True , __a : List[str]=0 , __a : Tuple=1 , __a : Optional[Any]=2 , __a : Tuple=66 , __a : str="block_sparse" , __a : Tuple=True , __a : Optional[int]=False , __a : str=64 , __a : Tuple=3 , __a : Any=None , **__a : Dict , ):
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , sep_token_id=__a , **__a , )
_a = vocab_size
_a = max_position_embeddings
_a = hidden_size
_a = num_hidden_layers
_a = num_attention_heads
_a = intermediate_size
_a = hidden_act
_a = hidden_dropout_prob
_a = attention_probs_dropout_prob
_a = initializer_range
_a = type_vocab_size
_a = layer_norm_eps
_a = use_cache
_a = rescale_embeddings
_a = attention_type
_a = use_bias
_a = block_size
_a = num_random_blocks
_a = classifier_dropout
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
@property
def UpperCamelCase__ ( self : Optional[int] ):
if self.task == "multiple-choice":
_a = {0: "batch", 1: "choice", 2: "sequence"}
else:
_a = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 63 | 0 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class __lowerCamelCase ( unittest.TestCase):
"""simple docstring"""
UpperCamelCase__ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
UpperCamelCase__ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
UpperCamelCase__ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
UpperCamelCase__ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=UpperCAmelCase , tokenizer=UpperCAmelCase , candidate_labels=['polics', 'health'] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# No kwarg
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , ['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics'] )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels='politics, public health' )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier('Who are you voting for in 2020?' , candidate_labels=['politics', 'public health'] )
self.assertEqual(
UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs['scores'] ) ) , 1.0 )
_UpperCAmelCase = classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='This text is about {}' )
self.assertEqual(UpperCAmelCase , {'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(['I am happy'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(['I am happy', 'I am sad'] , ['positive', 'negative'] )
self.assertEqual(
UpperCAmelCase , [
{'sequence': ANY(UpperCAmelCase ), 'labels': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )], 'scores': [ANY(UpperCAmelCase ), ANY(UpperCAmelCase )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCAmelCase ):
classifier('' , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier(UpperCAmelCase , candidate_labels='politics' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels='' )
with self.assertRaises(UpperCAmelCase ):
classifier('Who are you voting for in 2020?' , candidate_labels=UpperCAmelCase )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template='Not formatting template' , )
with self.assertRaises(UpperCAmelCase ):
classifier(
'Who are you voting for in 2020?' , candidate_labels='politics' , hypothesis_template=UpperCAmelCase , )
self.run_entailment_id(UpperCAmelCase )
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {'LABEL_0': 0, 'LABEL_1': 1, 'LABEL_2': 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {'entailment': 0, 'neutral': 1, 'contradiction': 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 0, 'NON-ENTAIL': 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {'ENTAIL': 2, 'NEUTRAL': 1, 'CONTR': 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(UpperCAmelCase , zero_shot_classifier.entailment_id )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
'Who are you voting for in 2020?' * 100 , candidate_labels=['politics', 'public health', 'science'] )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='pt' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline(
'zero-shot-classification' , model='sshleifer/tiny-distilbert-base-cased-distilled-squad' , framework='tf' , )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['science', 'public health', 'politics'],
'scores': [0.3_33, 0.3_33, 0.3_33],
} , )
@slow
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='pt' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
@slow
@require_tf
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = pipeline('zero-shot-classification' , model='roberta-large-mnli' , framework='tf' )
_UpperCAmelCase = zero_shot_classifier(
'Who are you voting for in 2020?' , candidate_labels=['politics', 'public health', 'science'] )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': 'Who are you voting for in 2020?',
'labels': ['politics', 'public health', 'science'],
'scores': [0.9_76, 0.0_15, 0.0_09],
} , )
_UpperCAmelCase = zero_shot_classifier(
'The dominant sequence transduction models are based on complex recurrent or convolutional neural networks'
' in an encoder-decoder configuration. The best performing models also connect the encoder and decoder'
' through an attention mechanism. We propose a new simple network architecture, the Transformer, based'
' solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two'
' machine translation tasks show these models to be superior in quality while being more parallelizable'
' and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014'
' English-to-German translation task, improving over the existing best results, including ensembles by'
' over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new'
' single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small'
' fraction of the training costs of the best models from the literature. We show that the Transformer'
' generalizes well to other tasks by applying it successfully to English constituency parsing both with'
' large and limited training data.' , candidate_labels=['machine learning', 'statistics', 'translation', 'vision'] , multi_label=UpperCAmelCase , )
self.assertEqual(
nested_simplify(UpperCAmelCase ) , {
'sequence': (
'The dominant sequence transduction models are based on complex recurrent or convolutional neural'
' networks in an encoder-decoder configuration. The best performing models also connect the'
' encoder and decoder through an attention mechanism. We propose a new simple network'
' architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence'
' and convolutions entirely. Experiments on two machine translation tasks show these models to be'
' superior in quality while being more parallelizable and requiring significantly less time to'
' train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,'
' improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014'
' English-to-French translation task, our model establishes a new single-model state-of-the-art'
' BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training'
' costs of the best models from the literature. We show that the Transformer generalizes well to'
' other tasks by applying it successfully to English constituency parsing both with large and'
' limited training data.'
),
'labels': ['translation', 'machine learning', 'vision', 'statistics'],
'scores': [0.8_17, 0.7_13, 0.0_18, 0.0_18],
} , )
| 39 |
'''simple docstring'''
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ , lowerCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : Dict , *,
__a : int = 4 , __a : int = 7_68 , __a : int , __a : int , ):
super().__init__()
_a = nn.Parameter(torch.zeros(__a ) )
# parameters for additional clip time embeddings
_a = nn.Linear(__a , __a )
_a = nn.Linear(__a , __a )
# parameters for encoder hidden states
_a = clip_extra_context_tokens
_a = nn.Linear(
__a , self.clip_extra_context_tokens * cross_attention_dim )
_a = nn.Linear(__a , __a )
_a = nn.LayerNorm(__a )
def UpperCamelCase__ ( self : Optional[Any] , *, __a : Tuple , __a : Union[str, Any] , __a : Any , __a : List[Any] ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
_a = image_embeddings.shape[0]
_a = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
_a = classifier_free_guidance_embeddings.expand(
__a , -1 )
_a = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
_a = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
_a = self.embedding_proj(__a )
_a = self.clip_image_embeddings_project_to_time_embeddings(__a )
_a = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
_a = self.clip_extra_context_tokens_proj(__a )
_a = clip_extra_context_tokens.reshape(__a , -1 , self.clip_extra_context_tokens )
_a = clip_extra_context_tokens.permute(0 , 2 , 1 )
_a = self.encoder_hidden_states_proj(__a )
_a = self.text_encoder_hidden_states_norm(__a )
_a = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings
| 63 | 0 |
"""simple docstring"""
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class _A ( unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase : str = MODEL_FOR_MASKED_LM_MAPPING
UpperCAmelCase : Any = TF_MODEL_FOR_MASKED_LM_MAPPING
def __snake_case ( self : Tuple):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def __snake_case ( self : Optional[int]):
a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf")
a : Any = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is grouped", "score": 2.1e-05, "token": 38015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-05, "token": 25506, "token_str": " accuser"},
] , )
a : Any = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-05,
"token": 38015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-05,
"token": 25506,
"token_str": " accuser",
},
] , )
a : Dict = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-05, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __snake_case ( self : Optional[int]):
a : str = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt")
a : Optional[Any] = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Maul", "score": 2.2e-05, "token": 35676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
a : Dict = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-05, "token": 16416, "token_str": "ELS"},
] , )
a : Optional[Any] = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
{"sequence": "My name is Patrick", "score": 2.1e-05, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-05, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-05, "token": 13606, "token_str": " Clara"},
] , )
a : Optional[int] = unmasker("My name is <mask> <mask>" , top_k=2)
self.assertEqual(
nested_simplify(__UpperCAmelCase , decimals=6) , [
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-05,
"token": 35676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-05, "token": 16416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def __snake_case ( self : int):
a : int = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt")
# convert model to fp16
pipe.model.half()
a : Tuple = pipe("Paris is the [MASK] of France.")
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCAmelCase , __UpperCAmelCase)
@slow
@require_torch
def __snake_case ( self : Dict):
a : Any = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt")
self.run_large_test(__UpperCAmelCase)
@slow
@require_tf
def __snake_case ( self : Optional[int]):
a : List[Any] = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf")
self.run_large_test(__UpperCAmelCase)
def __snake_case ( self : int , __UpperCAmelCase : Optional[int]):
a : Tuple = unmasker("My name is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
a : List[str] = unmasker("The largest city in France is <mask>")
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 12790,
"token_str": " Lyon",
},
] , )
a : int = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3)
self.assertEqual(
nested_simplify(__UpperCAmelCase) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 13606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def __snake_case ( self : Union[str, Any]):
a : Any = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt")
a : Dict = None
a : str = None
self.run_pipeline_test(__UpperCAmelCase , [])
@require_tf
def __snake_case ( self : Tuple):
a : Optional[int] = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf")
a : Tuple = None
a : Optional[Any] = None
self.run_pipeline_test(__UpperCAmelCase , [])
def __snake_case ( self : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)")
a : Optional[Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Dict = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Tuple , __UpperCAmelCase : Optional[Any]):
a : Union[str, Any] = fill_masker.tokenizer
a : Union[str, Any] = fill_masker.model
a : Union[str, Any] = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : List[Any] = fill_masker([f'''This is a {tokenizer.mask_token}'''])
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : List[str] = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''])
self.assertEqual(
__UpperCAmelCase , [
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
] , )
with self.assertRaises(__UpperCAmelCase):
fill_masker([None])
# No mask_token is not supported
with self.assertRaises(__UpperCAmelCase):
fill_masker("This is")
self.run_test_top_k(__UpperCAmelCase , __UpperCAmelCase)
self.run_test_targets(__UpperCAmelCase , __UpperCAmelCase)
self.run_test_top_k_targets(__UpperCAmelCase , __UpperCAmelCase)
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCAmelCase , __UpperCAmelCase)
self.fill_mask_with_multiple_masks(__UpperCAmelCase , __UpperCAmelCase)
def __snake_case ( self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : int):
a : str = tokenizer.get_vocab()
a : Any = sorted(vocab.keys())[:2]
# Pipeline argument
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , targets=__UpperCAmelCase)
a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''')
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : int = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase)
a : Tuple = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase))
# Call argument
a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : str = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : Any = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , __UpperCAmelCase)
a : List[Any] = [tokenizer.decode([x]) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(__UpperCAmelCase))
# Score equivalence
a : Any = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
a : Optional[Any] = [top_mask["token_str"] for top_mask in outputs]
a : List[Any] = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase) == set(__UpperCAmelCase):
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=__UpperCAmelCase)
a : List[str] = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
# Raises with invalid
with self.assertRaises(__UpperCAmelCase):
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[])
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCAmelCase):
a : List[Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""])
with self.assertRaises(__UpperCAmelCase):
a : Union[str, Any] = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="")
def __snake_case ( self : int , __UpperCAmelCase : Optional[Any] , __UpperCAmelCase : str):
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase , top_k=2)
a : str = fill_masker(f'''This is a {tokenizer.mask_token}''')
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
a : Union[str, Any] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Optional[int] = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2)
self.assertEqual(
__UpperCAmelCase , [
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
] , )
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
def __snake_case ( self : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : str):
a : Tuple = tokenizer.get_vocab()
a : List[str] = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
# top_k=2, ntargets=3
a : Dict = sorted(vocab.keys())[:3]
a : int = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=__UpperCAmelCase)
# If we use the most probably targets, and filter differently, we should still
# have the same results
a : Union[str, Any] = [el["token_str"] for el in sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x["score"] , reverse=__UpperCAmelCase)]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCAmelCase).issubset(__UpperCAmelCase):
a : Tuple = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=__UpperCAmelCase)
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCAmelCase) , nested_simplify(__UpperCAmelCase))
def __snake_case ( self : int , __UpperCAmelCase : Any , __UpperCAmelCase : Any):
a : str = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : str = tokenizer.get_vocab()
# String duplicates + id duplicates
a : List[Any] = sorted(vocab.keys())[:3]
a : Dict = [targets[0], targets[1], targets[0], targets[2], targets[1]]
a : Union[str, Any] = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=__UpperCAmelCase , top_k=10)
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCAmelCase) , 3)
def __snake_case ( self : Optional[int] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : List[Any]):
a : Dict = FillMaskPipeline(model=__UpperCAmelCase , tokenizer=__UpperCAmelCase)
a : Tuple = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2)
self.assertEqual(
__UpperCAmelCase , [
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
[
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
{"sequence": ANY(__UpperCAmelCase), "score": ANY(__UpperCAmelCase), "token": ANY(__UpperCAmelCase), "token_str": ANY(__UpperCAmelCase)},
],
] , )
| 40 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _lowerCamelCase ( lowercase : Dict ) -> Any:
_a = filter(lambda lowercase : p.requires_grad , model.parameters() )
_a = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase_ : int = logging.getLogger(__name__)
def _lowerCamelCase ( lowercase : List[Any] , lowercase : Any ) -> Any:
if metric == "rouge2":
_a = "{val_avg_rouge2:.4f}-{step_count}"
elif metric == "bleu":
_a = "{val_avg_bleu:.4f}-{step_count}"
elif metric == "em":
_a = "{val_avg_em:.4f}-{step_count}"
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
" function." )
_a = ModelCheckpoint(
dirpath=lowercase , filename=lowercase , monitor=F'val_{metric}' , mode="max" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def _lowerCamelCase ( lowercase : Optional[int] , lowercase : Optional[int] ) -> Union[str, Any]:
return EarlyStopping(
monitor=F'val_{metric}' , mode="min" if "loss" in metric else "max" , patience=lowercase , verbose=lowercase , )
class __SCREAMING_SNAKE_CASE (pl.Callback ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
_a = {f'lr_group_{i}': param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__a )
@rank_zero_only
def UpperCamelCase__ ( self : Optional[int] , __a : pl.Trainer , __a : pl.LightningModule , __a : str , __a : Tuple=True ):
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
_a = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]} )
# Log results
_a = Path(pl_module.hparams.output_dir )
if type_path == "test":
_a = od / "test_results.txt"
_a = od / "test_generations.txt"
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_a = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
_a = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=__a )
generations_file.parent.mkdir(exist_ok=__a )
with open(__a , "a+" ) as writer:
for key in sorted(__a ):
if key in ["log", "progress_bar", "preds"]:
continue
_a = metrics[key]
if isinstance(__a , torch.Tensor ):
_a = val.item()
_a = f'{key}: {val:.6f}\n'
writer.write(__a )
if not save_generations:
return
if "preds" in metrics:
_a = "\n".join(metrics["preds"] )
generations_file.open("w+" ).write(__a )
@rank_zero_only
def UpperCamelCase__ ( self : int , __a : List[Any] , __a : Union[str, Any] ):
try:
_a = pl_module.model.model.num_parameters()
except AttributeError:
_a = pl_module.model.num_parameters()
_a = count_trainable_parameters(__a )
# mp stands for million parameters
trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6} )
@rank_zero_only
def UpperCamelCase__ ( self : Union[str, Any] , __a : pl.Trainer , __a : pl.LightningModule ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__a , __a , "test" )
@rank_zero_only
def UpperCamelCase__ ( self : Any , __a : pl.Trainer , __a : int ):
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 63 | 0 |
'''simple docstring'''
from transformers import BertTokenizerFast
from .custom_tokenization import CustomTokenizer
class _lowercase ( _lowercase ):
a = CustomTokenizer
pass
| 41 |
'''simple docstring'''
import math
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
def UpperCamelCase__ ( self : List[str] , __a : list[list[float]] , __a : list[int] ):
_a = 0.0
_a = 0.0
for i in range(len(__a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def UpperCamelCase__ ( self : List[Any] , __a : list[list[int | float]] , __a : list[int] , __a : int , __a : float ):
for i in range(len(__a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def _lowerCamelCase ( ) -> None:
# Training Examples ( m, n )
_a = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
_a = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
_a = SelfOrganizingMap()
_a = 3
_a = 0.5
for _ in range(lowercase ):
for j in range(len(lowercase ) ):
# training sample
_a = training_samples[j]
# Compute the winning vector
_a = self_organizing_map.get_winner(lowercase , lowercase )
# Update the winning vector
_a = self_organizing_map.update(lowercase , lowercase , lowercase , lowercase )
# classify test sample
_a = [0, 0, 0, 1]
_a = self_organizing_map.get_winner(lowercase , lowercase )
# results
print(F'Clusters that the test sample belongs to : {winner}' )
print(F'Weights that have been trained : {weights}' )
# running the main() function
if __name__ == "__main__":
main()
| 63 | 0 |
'''simple docstring'''
lowercase : Union[str, Any] = "Input must be a string of 8 numbers plus letter"
lowercase : str = "TRWAGMYFPDXBNJZSQVHLCKE"
def SCREAMING_SNAKE_CASE__ ( __A ) -> bool:
if not isinstance(__A , __A ):
_snake_case = F'Expected string as input, found {type(__A ).__name__}'
raise TypeError(__A )
_snake_case = spanish_id.replace('-' , '' ).upper()
if len(__A ) != 9:
raise ValueError(__A )
try:
_snake_case = int(spanish_id_clean[0:8] )
_snake_case = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__A ) from ex
if letter.isdigit():
raise ValueError(__A )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
'''simple docstring'''
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='OwlViTImageProcessor'
__a =('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : List[Any] , __a : str=None , __a : List[str]=None , **__a : List[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Union[str, Any] , __a : Any=None , __a : List[str]=None , __a : int=None , __a : Optional[int]="max_length" , __a : List[str]="np" , **__a : Any ):
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none." )
if text is not None:
if isinstance(__a , __a ) or (isinstance(__a , __a ) and not isinstance(text[0] , __a )):
_a = [self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )]
elif isinstance(__a , __a ) and isinstance(text[0] , __a ):
_a = []
# Maximum number of queries across batch
_a = max([len(__a ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__a ) != max_num_queries:
_a = t + [" "] * (max_num_queries - len(__a ))
_a = self.tokenizer(__a , padding=__a , return_tensors=__a , **__a )
encodings.append(__a )
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings" )
if return_tensors == "np":
_a = np.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = np.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_a = jnp.concatenate([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = jnp.concatenate([encoding["attention_mask"] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_a = torch.cat([encoding["input_ids"] for encoding in encodings] , dim=0 )
_a = torch.cat([encoding["attention_mask"] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_a = tf.stack([encoding["input_ids"] for encoding in encodings] , axis=0 )
_a = tf.stack([encoding["attention_mask"] for encoding in encodings] , axis=0 )
else:
raise ValueError("Target return tensor type could not be returned" )
_a = BatchEncoding()
_a = input_ids
_a = attention_mask
if query_images is not None:
_a = BatchEncoding()
_a = self.image_processor(
__a , return_tensors=__a , **__a ).pixel_values
_a = query_pixel_values
if images is not None:
_a = self.image_processor(__a , return_tensors=__a , **__a )
if text is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_a = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__a ) , tensor_type=__a )
def UpperCamelCase__ ( self : List[str] , *__a : Union[str, Any] , **__a : int ):
return self.image_processor.post_process(*__a , **__a )
def UpperCamelCase__ ( self : Optional[int] , *__a : Optional[Any] , **__a : List[str] ):
return self.image_processor.post_process_object_detection(*__a , **__a )
def UpperCamelCase__ ( self : Optional[Any] , *__a : Dict , **__a : Union[str, Any] ):
return self.image_processor.post_process_image_guided_detection(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : Tuple , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : List[str] , *__a : List[Any] , **__a : Optional[int] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
try:
__UpperCamelCase :int = float(SCREAMING_SNAKE_CASE )
except ValueError:
raise ValueError('''Please enter a valid number''' )
__UpperCamelCase :int = decimal - int(SCREAMING_SNAKE_CASE )
if fractional_part == 0:
return int(SCREAMING_SNAKE_CASE ), 1
else:
__UpperCamelCase :Union[str, Any] = len(str(SCREAMING_SNAKE_CASE ).split('''.''' )[1] )
__UpperCamelCase :List[Any] = int(decimal * (10**number_of_frac_digits) )
__UpperCamelCase :Dict = 10**number_of_frac_digits
__UpperCamelCase , __UpperCamelCase :Optional[int] = denominator, numerator
while True:
__UpperCamelCase :Optional[Any] = dividend % divisor
if remainder == 0:
break
__UpperCamelCase , __UpperCamelCase :Optional[int] = divisor, remainder
__UpperCamelCase , __UpperCamelCase :Union[str, Any] = numerator / divisor, denominator / divisor
return int(SCREAMING_SNAKE_CASE ), int(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F'{decimal_to_fraction(2) = }')
print(F'{decimal_to_fraction(8_9.0) = }')
print(F'{decimal_to_fraction("67") = }')
print(F'{decimal_to_fraction("45.0") = }')
print(F'{decimal_to_fraction(1.5) = }')
print(F'{decimal_to_fraction("6.25") = }')
print(F'{decimal_to_fraction("78td") = }')
| 43 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : str ) -> list:
if n_term == "":
return []
_a = []
for temp in range(int(lowercase ) ):
series.append(F'1/{temp + 1}' if series else "1" )
return series
if __name__ == "__main__":
lowerCAmelCase_ : Union[str, Any] = input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term))
| 63 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class __A :
@staticmethod
def __A ( *a__ , **a__ ):
pass
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> str:
_lowerCAmelCase : str = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Image ) -> Dict:
_lowerCAmelCase : Dict = np.array(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = npimg.shape
return {"hash": hashimage(_lowerCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class __A ( unittest.TestCase ):
_UpperCamelCase : Optional[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __A ( self , a__ , a__ , a__ ):
_lowerCAmelCase : Union[str, Any] = MaskGenerationPipeline(model=a__ , image_processor=a__ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __A ( self , a__ , a__ ):
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def __A ( self ):
pass
@slow
@require_torch
def __A ( self ):
_lowerCAmelCase : Dict = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_lowerCAmelCase : Optional[int] = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def __A ( self ):
_lowerCAmelCase : Optional[int] = """facebook/sam-vit-huge"""
_lowerCAmelCase : Any = pipeline("""mask-generation""" , model=a__ )
_lowerCAmelCase : Optional[int] = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
_lowerCAmelCase : Tuple = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(a__ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(a__ , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0_0_5_3},
] , )
| 44 |
'''simple docstring'''
import argparse
import logging
import os
import re
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
DataCollatorForLanguageModeling,
PushToHubCallback,
TFAutoModelForMaskedLM,
create_optimizer,
)
lowerCAmelCase_ : List[str] = logging.getLogger(__name__)
lowerCAmelCase_ : List[Any] = tf.data.AUTOTUNE
def _lowerCamelCase ( ) -> Optional[int]:
_a = argparse.ArgumentParser(description="Train a masked language model on TPU." )
parser.add_argument(
"--pretrained_model_config" , type=lowercase , default="roberta-base" , help="The model config to use. Note that we don't copy the model's weights, only the config!" , )
parser.add_argument(
"--tokenizer" , type=lowercase , default="unigram-tokenizer-wikitext" , help="The name of the tokenizer to load. We use the pretrained tokenizer to initialize the model's vocab size." , )
parser.add_argument(
"--per_replica_batch_size" , type=lowercase , default=8 , help="Batch size per TPU core." , )
parser.add_argument(
"--no_tpu" , action="store_true" , help="If set, run on CPU and don't try to initialize a TPU. Useful for debugging on non-TPU instances." , )
parser.add_argument(
"--tpu_name" , type=lowercase , help="Name of TPU resource to initialize. Should be blank on Colab, and 'local' on TPU VMs." , default="local" , )
parser.add_argument(
"--tpu_zone" , type=lowercase , help="Google cloud zone that TPU resource is located in. Only used for non-Colab TPU nodes." , )
parser.add_argument(
"--gcp_project" , type=lowercase , help="Google cloud project name. Only used for non-Colab TPU nodes." )
parser.add_argument(
"--bfloat16" , action="store_true" , help="Use mixed-precision bfloat16 for training. This is the recommended lower-precision format for TPU." , )
parser.add_argument(
"--train_dataset" , type=lowercase , help="Path to training dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--shuffle_buffer_size" , type=lowercase , default=2**18 , help="Size of the shuffle buffer (in samples)" , )
parser.add_argument(
"--eval_dataset" , type=lowercase , help="Path to evaluation dataset to load. If the path begins with `gs://`"
" then the dataset will be loaded from a Google Cloud Storage bucket." , )
parser.add_argument(
"--num_epochs" , type=lowercase , default=1 , help="Number of epochs to train for." , )
parser.add_argument(
"--learning_rate" , type=lowercase , default=1E-4 , help="Learning rate to use for training." , )
parser.add_argument(
"--weight_decay_rate" , type=lowercase , default=1E-3 , help="Weight decay rate to use for training." , )
parser.add_argument(
"--max_length" , type=lowercase , default=512 , help="Maximum length of tokenized sequences. Should match the setting used in prepare_tfrecord_shards.py" , )
parser.add_argument(
"--mlm_probability" , type=lowercase , default=0.15 , help="Fraction of tokens to mask during training." , )
parser.add_argument("--output_dir" , type=lowercase , required=lowercase , help="Path to save model checkpoints to." )
parser.add_argument("--hub_model_id" , type=lowercase , help="Model ID to upload to on the Hugging Face Hub." )
_a = parser.parse_args()
return args
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Optional[int]:
try:
if args.tpu_name:
_a = tf.distribute.cluster_resolver.TPUClusterResolver(
args.tpu_name , zone=args.tpu_zone , project=args.gcp_project )
else:
_a = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
raise RuntimeError(
"Couldn't connect to TPU! Most likely you need to specify --tpu_name, --tpu_zone, or "
"--gcp_project. When running on a TPU VM, use --tpu_name local." )
tf.config.experimental_connect_to_cluster(lowercase )
tf.tpu.experimental.initialize_tpu_system(lowercase )
return tpu
def _lowerCamelCase ( lowercase : List[str] ) -> Any:
_a = 0
for file in file_list:
_a = file.split("/" )[-1]
_a = re.search(r"-\d+-(\d+)\.tfrecord" , lowercase ).group(1 )
_a = int(lowercase )
num_samples += sample_count
return num_samples
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : Tuple , lowercase : List[str] , lowercase : Any , lowercase : Tuple , lowercase : Optional[int]=None ) -> int:
_a = count_samples(lowercase )
_a = tf.data.Dataset.from_tensor_slices(lowercase )
if shuffle:
_a = dataset.shuffle(len(lowercase ) )
_a = tf.data.TFRecordDataset(lowercase , num_parallel_reads=lowercase )
# TF can't infer the total sample count because it doesn't read all the records yet, so we assert it here
_a = dataset.apply(tf.data.experimental.assert_cardinality(lowercase ) )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
if shuffle:
assert shuffle_buffer_size is not None
_a = dataset.shuffle(args.shuffle_buffer_size )
_a = dataset.batch(lowercase , drop_remainder=lowercase )
_a = dataset.map(lowercase , num_parallel_calls=lowercase )
_a = dataset.prefetch(lowercase )
return dataset
def _lowerCamelCase ( lowercase : Union[str, Any] ) -> Dict:
if not args.no_tpu:
_a = initialize_tpu(lowercase )
_a = tf.distribute.TPUStrategy(lowercase )
else:
_a = tf.distribute.OneDeviceStrategy(device="/gpu:0" )
if args.bfloataa:
tf.keras.mixed_precision.set_global_policy("mixed_bfloat16" )
_a = AutoTokenizer.from_pretrained(args.tokenizer )
_a = AutoConfig.from_pretrained(args.pretrained_model_config )
_a = tokenizer.vocab_size
_a = tf.io.gfile.glob(os.path.join(args.train_dataset , "*.tfrecord" ) )
if not training_records:
raise ValueError(F'No .tfrecord files found in {args.train_dataset}.' )
_a = tf.io.gfile.glob(os.path.join(args.eval_dataset , "*.tfrecord" ) )
if not eval_records:
raise ValueError(F'No .tfrecord files found in {args.eval_dataset}.' )
_a = count_samples(lowercase )
_a = num_train_samples // (args.per_replica_batch_size * strategy.num_replicas_in_sync)
_a = steps_per_epoch * args.num_epochs
with strategy.scope():
_a = TFAutoModelForMaskedLM.from_config(lowercase )
model(model.dummy_inputs ) # Pass some dummy inputs through the model to ensure all the weights are built
_a , _a = create_optimizer(
num_train_steps=lowercase , num_warmup_steps=total_train_steps // 20 , init_lr=args.learning_rate , weight_decay_rate=args.weight_decay_rate , )
# Transformers models compute the right loss for their task by default when labels are passed, and will
# use this for training unless you specify your own loss function in compile().
model.compile(optimizer=lowercase , metrics=["accuracy"] )
def decode_fn(lowercase : int ):
_a = {
"input_ids": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
"attention_mask": tf.io.FixedLenFeature(dtype=tf.intaa , shape=(args.max_length,) ),
}
return tf.io.parse_single_example(lowercase , lowercase )
# Many of the data collators in Transformers are TF-compilable when return_tensors == "tf", so we can
# use their methods in our data pipeline.
_a = DataCollatorForLanguageModeling(
tokenizer=lowercase , mlm_probability=args.mlm_probability , mlm=lowercase , return_tensors="tf" )
def mask_with_collator(lowercase : List[Any] ):
# TF really needs an isin() function
_a = (
~tf.cast(batch["attention_mask"] , tf.bool )
| (batch["input_ids"] == tokenizer.cls_token_id)
| (batch["input_ids"] == tokenizer.sep_token_id)
)
_a , _a = data_collator.tf_mask_tokens(
batch["input_ids"] , vocab_size=len(lowercase ) , mask_token_id=tokenizer.mask_token_id , special_tokens_mask=lowercase , )
return batch
_a = args.per_replica_batch_size * strategy.num_replicas_in_sync
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , shuffle_buffer_size=args.shuffle_buffer_size , )
_a = prepare_dataset(
lowercase , decode_fn=lowercase , mask_fn=lowercase , batch_size=lowercase , shuffle=lowercase , )
_a = []
if args.hub_model_id:
callbacks.append(
PushToHubCallback(output_dir=args.output_dir , hub_model_id=args.hub_model_id , tokenizer=lowercase ) )
model.fit(
lowercase , validation_data=lowercase , epochs=args.num_epochs , callbacks=lowercase , )
model.save_pretrained(args.output_dir )
if __name__ == "__main__":
lowerCAmelCase_ : Any = parse_args()
main(args)
| 63 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase_ = {
"configuration_trajectory_transformer": [
"TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"TrajectoryTransformerConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = [
"TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrajectoryTransformerModel",
"TrajectoryTransformerPreTrainedModel",
"load_tf_weights_in_trajectory_transformer",
]
if TYPE_CHECKING:
from .configuration_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TrajectoryTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trajectory_transformer import (
TRAJECTORY_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TrajectoryTransformerModel,
TrajectoryTransformerPreTrainedModel,
load_tf_weights_in_trajectory_transformer,
)
else:
import sys
lowercase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 45 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =['image_processor', 'tokenizer']
__a ='LayoutLMv3ImageProcessor'
__a =('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Tuple , __a : int=None , __a : Union[str, Any]=None , **__a : Optional[Any] ):
_a = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __a , )
_a = kwargs.pop("feature_extractor" )
_a = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__a , __a )
def __call__( self : Any , __a : List[str] , __a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , __a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , __a : Union[List[List[int]], List[List[List[int]]]] = None , __a : Optional[Union[List[int], List[List[int]]]] = None , __a : bool = True , __a : Union[bool, str, PaddingStrategy] = False , __a : Union[bool, str, TruncationStrategy] = None , __a : Optional[int] = None , __a : int = 0 , __a : Optional[int] = None , __a : Optional[bool] = None , __a : Optional[bool] = None , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = False , __a : bool = True , __a : Optional[Union[str, TensorType]] = None , **__a : Dict , ):
# verify input
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True." )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"You cannot provide word labels if you initialized the image processor with apply_ocr set to True." )
# first, apply the image processor
_a = self.image_processor(images=__a , return_tensors=__a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(__a , __a ):
_a = [text] # add batch dimension (as the image processor always adds a batch dimension)
_a = features["words"]
_a = self.tokenizer(
text=text if text is not None else features["words"] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features["boxes"] , word_labels=__a , add_special_tokens=__a , padding=__a , truncation=__a , max_length=__a , stride=__a , pad_to_multiple_of=__a , return_token_type_ids=__a , return_attention_mask=__a , return_overflowing_tokens=__a , return_special_tokens_mask=__a , return_offsets_mapping=__a , return_length=__a , verbose=__a , return_tensors=__a , **__a , )
# add pixel values
_a = features.pop("pixel_values" )
if return_overflowing_tokens is True:
_a = self.get_overflowing_images(__a , encoded_inputs["overflow_to_sample_mapping"] )
_a = images
return encoded_inputs
def UpperCamelCase__ ( self : Optional[int] , __a : str , __a : List[Any] ):
# in case there's an overflow, ensure each `input_ids` sample is mapped to its corresponding image
_a = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(__a ) != len(__a ):
raise ValueError(
"Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"
f' {len(__a )} and {len(__a )}' )
return images_with_overflow
def UpperCamelCase__ ( self : int , *__a : str , **__a : Tuple ):
return self.tokenizer.batch_decode(*__a , **__a )
def UpperCamelCase__ ( self : str , *__a : List[Any] , **__a : List[str] ):
return self.tokenizer.decode(*__a , **__a )
@property
def UpperCamelCase__ ( self : Tuple ):
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCamelCase__ ( self : int ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __a , )
return self.image_processor_class
@property
def UpperCamelCase__ ( self : List[str] ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __a , )
return self.image_processor
| 63 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ = {"configuration_focalnet": ["FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FocalNetConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FocalNetForImageClassification",
"FocalNetForMaskedImageModeling",
"FocalNetBackbone",
"FocalNetModel",
"FocalNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 46 |
'''simple docstring'''
from ....utils import logging
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
def __init__( self : Tuple , __a : int , __a : Any=None , __a : Optional[int]=20_48 ):
_a = config.__dict__
_a = modal_hidden_size
if num_labels:
_a = num_labels
| 63 | 0 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCamelCase : Tuple = "pt"
elif is_tf_available():
lowerCamelCase : Optional[int] = "tf"
else:
lowerCamelCase : Tuple = "jax"
class A__ ( A__ , unittest.TestCase ):
A__ = ByTaTokenizer
A__ = False
def A ( self : str ) -> Any:
'''simple docstring'''
super().setUp()
_SCREAMING_SNAKE_CASE =ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def A ( self : List[Any] ) -> Union[str, Any]:
'''simple docstring'''
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def A ( self : Dict , **_a : Union[str, Any] ) -> ByTaTokenizer:
'''simple docstring'''
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def A ( self : List[str] , _a : Dict , _a : str=False , _a : Optional[int]=20 , _a : int=5 ) -> Tuple[str, list]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
for i in range(len(_a ) ):
try:
_SCREAMING_SNAKE_CASE =tokenizer.decode([i] , clean_up_tokenization_spaces=_a )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
_SCREAMING_SNAKE_CASE =list(filter(lambda _a : re.match(r'^[ a-zA-Z]+$' , t[1] ) , _a ) )
_SCREAMING_SNAKE_CASE =list(filter(lambda _a : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_a ) , _a ) )
if max_length is not None and len(_a ) > max_length:
_SCREAMING_SNAKE_CASE =toks[:max_length]
if min_length is not None and len(_a ) < min_length and len(_a ) > 0:
while len(_a ) < min_length:
_SCREAMING_SNAKE_CASE =toks + toks
# toks_str = [t[1] for t in toks]
_SCREAMING_SNAKE_CASE =[t[0] for t in toks]
# Ensure consistency
_SCREAMING_SNAKE_CASE =tokenizer.decode(_a , clean_up_tokenization_spaces=_a )
if " " not in output_txt and len(_a ) > 1:
_SCREAMING_SNAKE_CASE =(
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_a )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_a )
)
if with_prefix_space:
_SCREAMING_SNAKE_CASE =' ' + output_txt
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
return output_txt, output_ids
def A ( self : Any ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE =tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
_SCREAMING_SNAKE_CASE =tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def A ( self : Optional[int] ) -> str:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE ='Unicode €.'
_SCREAMING_SNAKE_CASE =tokenizer(_a )
_SCREAMING_SNAKE_CASE =[88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
_SCREAMING_SNAKE_CASE =tokenizer.decode(_a )
self.assertEqual(_a , 'Unicode €.</s>' )
_SCREAMING_SNAKE_CASE =tokenizer('e è é ê ë' )
_SCREAMING_SNAKE_CASE =[104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _a )
# decoding
_SCREAMING_SNAKE_CASE =tokenizer.decode(_a )
self.assertEqual(_a , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def A ( self : Union[str, Any] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE =['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
_SCREAMING_SNAKE_CASE =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a , return_tensors=_a )
self.assertIsInstance(_a , _a )
if FRAMEWORK != "jax":
_SCREAMING_SNAKE_CASE =list(batch.input_ids.numpy()[0] )
else:
_SCREAMING_SNAKE_CASE =list(batch.input_ids.tolist()[0] )
self.assertListEqual(_a , _a )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def A ( self : Dict ) -> Union[str, Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE =['A long paragraph for summarization.', 'Another paragraph for summarization.']
_SCREAMING_SNAKE_CASE =tokenizer(_a , padding=_a , return_tensors=_a )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _a )
self.assertIn('attention_mask' , _a )
self.assertNotIn('decoder_input_ids' , _a )
self.assertNotIn('decoder_attention_mask' , _a )
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE =[
'Summary of the text.',
'Another summary.',
]
_SCREAMING_SNAKE_CASE =tokenizer(
text_target=_a , max_length=32 , padding='max_length' , truncation=_a , return_tensors=_a )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def A ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.ta_base_tokenizer
_SCREAMING_SNAKE_CASE =['A long paragraph for summarization. </s>']
_SCREAMING_SNAKE_CASE =['Summary of the text. </s>']
# fmt: off
_SCREAMING_SNAKE_CASE =[68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
_SCREAMING_SNAKE_CASE =[86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
_SCREAMING_SNAKE_CASE =tokenizer(_a , text_target=_a )
self.assertEqual(_a , batch['input_ids'][0] )
self.assertEqual(_a , batch['labels'][0] )
def A ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
_SCREAMING_SNAKE_CASE =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =' He is very happy, UNwant\u00E9d,running'
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =tokenizer.__class__.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
shutil.rmtree(_a )
_SCREAMING_SNAKE_CASE =self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
# Isolate this from the other tests because we save additional tokens/etc
_SCREAMING_SNAKE_CASE =tempfile.mkdtemp()
_SCREAMING_SNAKE_CASE =' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
_SCREAMING_SNAKE_CASE =tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
_SCREAMING_SNAKE_CASE =tokenizer.encode(_a , add_special_tokens=_a )
tokenizer.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =tokenizer.__class__.from_pretrained(_a )
_SCREAMING_SNAKE_CASE =after_tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
_SCREAMING_SNAKE_CASE =tokenizer.__class__.from_pretrained(_a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_a )
def A ( self : int ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
with open(os.path.join(_a , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
_SCREAMING_SNAKE_CASE =json.load(_a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
_SCREAMING_SNAKE_CASE =json.load(_a )
_SCREAMING_SNAKE_CASE =[f"<extra_id_{i}>" for i in range(125 )]
_SCREAMING_SNAKE_CASE =added_tokens_extra_ids + [
'an_additional_special_token'
]
_SCREAMING_SNAKE_CASE =added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_a , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
with open(os.path.join(_a , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_a , _a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
_SCREAMING_SNAKE_CASE =tokenizer_class.from_pretrained(
_a , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
_SCREAMING_SNAKE_CASE =added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_a )]
_SCREAMING_SNAKE_CASE =tokenizer_class.from_pretrained(
_a , additional_special_tokens=_a , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def A ( self : Union[str, Any] ) -> int:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =[]
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_a )
_SCREAMING_SNAKE_CASE =tokenizer_class.from_pretrained(_a )
self.assertTrue(tokenizer.decode([255] ) == '' )
def A ( self : int ) -> List[Any]:
'''simple docstring'''
pass
def A ( self : Optional[int] ) -> Optional[Any]:
'''simple docstring'''
pass
def A ( self : List[Any] ) -> Tuple:
'''simple docstring'''
pass
def A ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
pass
def A ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizers(fast=_a , do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE =['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
_SCREAMING_SNAKE_CASE =tokenizer.convert_tokens_to_string(_a )
self.assertIsInstance(_a , _a )
def A ( self : List[str] ) -> Dict:
'''simple docstring'''
_SCREAMING_SNAKE_CASE =self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
_SCREAMING_SNAKE_CASE =[
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =tokenizer.convert_ids_to_tokens(
_a , skip_special_tokens=_a )
for attr in attributes_list:
setattr(_a , attr + '_id' , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + '_id' ) , _a )
setattr(_a , attr + '_id' , _a )
self.assertEqual(getattr(_a , _a ) , _a )
self.assertEqual(getattr(_a , attr + '_id' ) , _a )
setattr(_a , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_a , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_a , 'additional_special_tokens_ids' ) , [] )
setattr(_a , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_a , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_a , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] )
| 47 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int = 100 ) -> int:
_a = 0
_a = 0
for i in range(1 , n + 1 ):
sum_of_squares += i**2
sum_of_ints += i
return sum_of_ints**2 - sum_of_squares
if __name__ == "__main__":
print(f"""{solution() = }""")
| 63 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'configuration_graphormer': ['GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GraphormerConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : List[Any] = [
'GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'GraphormerForGraphClassification',
'GraphormerModel',
'GraphormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_graphormer import GRAPHORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, GraphormerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_graphormer import (
GRAPHORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
GraphormerForGraphClassification,
GraphormerModel,
GraphormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
if num < 0:
return False
_a = num
_a = 0
while num > 0:
_a = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
__snake_case :Union[str, Any] = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
__snake_case :Union[str, Any] = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
__snake_case :Optional[int] = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 49 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_tokenizers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
lowerCAmelCase_ : int = {'configuration_gpt_neox': ['GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXConfig']}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : Optional[int] = ['GPTNeoXTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase_ : List[str] = [
'GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXForCausalLM',
'GPTNeoXForQuestionAnswering',
'GPTNeoXForSequenceClassification',
'GPTNeoXForTokenClassification',
'GPTNeoXLayer',
'GPTNeoXModel',
'GPTNeoXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox import GPT_NEOX_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_neox_fast import GPTNeoXTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox import (
GPT_NEOX_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXLayer,
GPTNeoXModel,
GPTNeoXPreTrainedModel,
)
else:
import sys
lowerCAmelCase_ : List[str] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 63 | 0 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
_UpperCAmelCase : Any = [num for num in range(3, 10_00_01, 2) if not is_prime(num)]
def SCREAMING_SNAKE_CASE ( _UpperCAmelCase ) -> list[int]:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
raise ValueError('n must be an integer' )
if n <= 0:
raise ValueError('n must be >= 0' )
lowerCamelCase__ : int = []
for num in range(len(_UpperCAmelCase ) ):
lowerCamelCase__ : Union[str, Any] = 0
while 2 * i * i <= odd_composites[num]:
lowerCamelCase__ : Dict = odd_composites[num] - 2 * i * i
if is_prime(_UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(_UpperCAmelCase ) == n:
return list_nums
return []
def SCREAMING_SNAKE_CASE ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 50 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCAmelCase_ : Any = get_tests_dir('fixtures')
lowerCAmelCase_ : Union[str, Any] = get_tests_dir('fixtures/dummy_feature_extractor_config.json')
lowerCAmelCase_ : Dict = get_tests_dir('fixtures/dummy-config.json')
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : Optional[int] ):
_a = 0
def UpperCamelCase__ ( self : str ):
_a = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : List[Any] ):
with tempfile.TemporaryDirectory() as tmpdirname:
_a = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
_a = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
_a = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
_a = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Tuple ):
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def UpperCamelCase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
_a = AutoFeatureExtractor.from_pretrained("bert-base" )
def UpperCamelCase__ ( self : Optional[Any] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
_a = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def UpperCamelCase__ ( self : List[Any] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
_a = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def UpperCamelCase__ ( self : List[Any] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def UpperCamelCase__ ( self : Any ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
_a = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
_a = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def UpperCamelCase__ ( self : Tuple ):
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a =True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
_a = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 63 | 0 |
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("0.12.2"):
raise Exception("requires fairseq >= 0.12.2")
if version.parse(fairseq.__version__) > version.parse("2"):
raise Exception("requires fairseq < v2")
logging.set_verbosity_info()
snake_case_ : List[Any] = logging.get_logger(__name__)
snake_case_ : Any = "Hello, World!"
snake_case_ : Tuple = "en_XX"
def A (__A : str , __A : str , __A : bool ) -> Dict:
"""simple docstring"""
UpperCAmelCase_ = Path('''data_bin''' )
UpperCAmelCase_ = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(__A ).parent ) , checkpoint_file=Path(__A ).name , _name='''xmod_base''' , arch='''xmod_base''' , task='''multilingual_masked_lm''' , data_name_or_path=str(__A ) , bpe='''sentencepiece''' , sentencepiece_model=str(Path(__A ).parent / '''sentencepiece.bpe.model''' ) , src_dict=str(data_dir / '''dict.txt''' ) , )
xmod.eval() # disable dropout
print(__A )
UpperCAmelCase_ = xmod.model.encoder.sentence_encoder
UpperCAmelCase_ = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , '''bottleneck''' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
UpperCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.weight.shape[0]
print('''Our X-MOD config:''' , __A )
UpperCAmelCase_ = XmodForSequenceClassification(__A ) if classification_head else XmodForMaskedLM(__A )
model.eval()
# Now let's copy all the weights.
# Embeddings
UpperCAmelCase_ = xmod_sent_encoder.embed_tokens.weight
UpperCAmelCase_ = xmod_sent_encoder.embed_positions.weight
UpperCAmelCase_ = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
UpperCAmelCase_ = xmod_sent_encoder.layernorm_embedding.weight
UpperCAmelCase_ = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
UpperCAmelCase_ = model.roberta.encoder.layer[i]
UpperCAmelCase_ = xmod_sent_encoder.layers[i]
# self attention
UpperCAmelCase_ = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('''Dimensions of self-attention weights do not match.''' )
UpperCAmelCase_ = xmod_layer.self_attn.q_proj.weight
UpperCAmelCase_ = xmod_layer.self_attn.q_proj.bias
UpperCAmelCase_ = xmod_layer.self_attn.k_proj.weight
UpperCAmelCase_ = xmod_layer.self_attn.k_proj.bias
UpperCAmelCase_ = xmod_layer.self_attn.v_proj.weight
UpperCAmelCase_ = xmod_layer.self_attn.v_proj.bias
# self-attention output
UpperCAmelCase_ = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('''Dimensions of self-attention output weights do not match.''' )
UpperCAmelCase_ = xmod_layer.self_attn.out_proj.weight
UpperCAmelCase_ = xmod_layer.self_attn.out_proj.bias
UpperCAmelCase_ = xmod_layer.self_attn_layer_norm.weight
UpperCAmelCase_ = xmod_layer.self_attn_layer_norm.bias
# intermediate
UpperCAmelCase_ = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of intermediate weights do not match.''' )
UpperCAmelCase_ = xmod_layer.fca.weight
UpperCAmelCase_ = xmod_layer.fca.bias
# output
UpperCAmelCase_ = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('''Dimensions of feed-forward weights do not match.''' )
UpperCAmelCase_ = xmod_layer.fca.weight
UpperCAmelCase_ = xmod_layer.fca.bias
UpperCAmelCase_ = xmod_layer.final_layer_norm.weight
UpperCAmelCase_ = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
UpperCAmelCase_ = xmod_layer.adapter_layer_norm.weight
UpperCAmelCase_ = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('''Lists of language adapters do not match.''' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
UpperCAmelCase_ = bert_output.adapter_modules[lang_code]
UpperCAmelCase_ = xmod_layer.adapter_modules[lang_code]
UpperCAmelCase_ = from_adapter.fca.weight
UpperCAmelCase_ = from_adapter.fca.bias
UpperCAmelCase_ = from_adapter.fca.weight
UpperCAmelCase_ = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
UpperCAmelCase_ = xmod_sent_encoder.layer_norm.weight
UpperCAmelCase_ = xmod_sent_encoder.layer_norm.bias
if classification_head:
UpperCAmelCase_ = xmod.model.classification_heads['''mnli'''].dense.weight
UpperCAmelCase_ = xmod.model.classification_heads['''mnli'''].dense.bias
UpperCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.weight
UpperCAmelCase_ = xmod.model.classification_heads['''mnli'''].out_proj.bias
else:
# LM Head
UpperCAmelCase_ = xmod.model.encoder.lm_head.dense.weight
UpperCAmelCase_ = xmod.model.encoder.lm_head.dense.bias
UpperCAmelCase_ = xmod.model.encoder.lm_head.layer_norm.weight
UpperCAmelCase_ = xmod.model.encoder.lm_head.layer_norm.bias
UpperCAmelCase_ = xmod.model.encoder.lm_head.weight
UpperCAmelCase_ = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
UpperCAmelCase_ = xmod.encode(__A ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(__A )
UpperCAmelCase_ = model(__A )[0]
if classification_head:
UpperCAmelCase_ = xmod.model.classification_heads['''mnli'''](xmod.extract_features(__A ) )
else:
UpperCAmelCase_ = xmod.model(__A , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
UpperCAmelCase_ = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
UpperCAmelCase_ = torch.allclose(__A , __A , atol=1E-3 )
print('''Do both models output the same tensors?''' , '''🔥''' if success else '''💩''' )
if not success:
raise Exception('''Something went wRoNg''' )
Path(__A ).mkdir(parents=__A , exist_ok=__A )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(__A )
if __name__ == "__main__":
snake_case_ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--xmod_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--classification_head", action="store_true", help="Whether to convert a final classification head."
)
snake_case_ : Optional[Any] = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 51 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : int = {
'bigcode/gpt_bigcode-santacoder': 'https://huggingface.co/bigcode/gpt_bigcode-santacoder/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='gpt_bigcode'
__a =['past_key_values']
__a ={
'hidden_size': 'n_embd',
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self : Optional[Any] , __a : Tuple=5_02_57 , __a : str=10_24 , __a : Dict=7_68 , __a : Tuple=12 , __a : str=12 , __a : Optional[int]=None , __a : Dict="gelu_pytorch_tanh" , __a : Tuple=0.1 , __a : Tuple=0.1 , __a : Union[str, Any]=0.1 , __a : Tuple=1e-5 , __a : str=0.02 , __a : Dict=True , __a : Union[str, Any]=True , __a : Optional[int]=5_02_56 , __a : Optional[int]=5_02_56 , __a : Union[str, Any]=True , __a : Dict=True , __a : Union[str, Any]=True , **__a : List[Any] , ):
_a = vocab_size
_a = n_positions
_a = n_embd
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = attention_softmax_in_fpaa
_a = scale_attention_softmax_in_fpaa
_a = multi_query
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
| 63 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def A_ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(_lowerCAmelCase ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def A_ ( ) -> Tuple:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def A_ ( ) -> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(_lowerCAmelCase ):
http_head("https://huggingface.co" )
| 52 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : int ) -> bool:
_a = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 63 | 0 |
'''simple docstring'''
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] =ComputeEnvironment.AMAZON_SAGEMAKER
SCREAMING_SNAKE_CASE_ : str =True
SCREAMING_SNAKE_CASE_ : Dict ="ml.p3.2xlarge"
SCREAMING_SNAKE_CASE_ : Dict ="accelerate_sagemaker_execution_role"
SCREAMING_SNAKE_CASE_ : Any ="hf-sm"
SCREAMING_SNAKE_CASE_ : List[Any] ="us-east-1"
SCREAMING_SNAKE_CASE_ : Any =1
SCREAMING_SNAKE_CASE_ : List[Any] ="accelerate-sagemaker-1"
SCREAMING_SNAKE_CASE_ : Optional[Any] ="1.6"
SCREAMING_SNAKE_CASE_ : Dict ="4.4"
SCREAMING_SNAKE_CASE_ : Optional[Any] ="train.py"
SCREAMING_SNAKE_CASE_ : List[Any] =[
"--model_name_or_path",
"bert",
"--do_train",
"False",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
SCREAMING_SNAKE_CASE_ : Tuple =[
"--model_name_or_path",
"bert",
"--do_train",
"--do_test",
"False",
"--do_predict",
"--epochs",
"3",
"--learning_rate",
"5e-5",
"--max_steps",
"50.5",
]
class snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCamelCase ( self : Tuple ):
# If no defaults are changed, `to_kwargs` returns an empty dict.
__UpperCamelCase = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] , __A )
assert isinstance(converted_args['do_train'] , __A )
assert isinstance(converted_args['epochs'] , __A )
assert isinstance(converted_args['learning_rate'] , __A )
assert isinstance(converted_args['max_steps'] , __A )
with pytest.raises(__A ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args )
| 53 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ : Dict = logging.get_logger(__name__)
lowerCAmelCase_ : Optional[int] = {
'ut/deta': 'https://huggingface.co/ut/deta/resolve/main/config.json',
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='deta'
__a ={
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : List[str] , __a : List[str]=None , __a : Dict=9_00 , __a : str=20_48 , __a : Tuple=6 , __a : List[str]=20_48 , __a : str=8 , __a : Union[str, Any]=6 , __a : int=10_24 , __a : List[Any]=8 , __a : Dict=0.0 , __a : Tuple=True , __a : Optional[Any]="relu" , __a : Tuple=2_56 , __a : Optional[Any]=0.1 , __a : int=0.0 , __a : List[Any]=0.0 , __a : Optional[int]=0.02 , __a : str=1.0 , __a : Dict=True , __a : Dict=False , __a : Optional[int]="sine" , __a : Any=5 , __a : List[str]=4 , __a : Optional[int]=4 , __a : List[str]=True , __a : str=3_00 , __a : int=True , __a : int=True , __a : Tuple=1 , __a : Optional[int]=5 , __a : Tuple=2 , __a : Dict=1 , __a : Optional[int]=1 , __a : Any=5 , __a : Optional[int]=2 , __a : Dict=0.1 , __a : str=0.25 , **__a : Tuple , ):
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
_a = CONFIG_MAPPING["resnet"](out_features=["stage2", "stage3", "stage4"] )
else:
if isinstance(__a , __a ):
_a = backbone_config.pop("model_type" )
_a = CONFIG_MAPPING[backbone_model_type]
_a = config_class.from_dict(__a )
_a = backbone_config
_a = num_queries
_a = max_position_embeddings
_a = d_model
_a = encoder_ffn_dim
_a = encoder_layers
_a = encoder_attention_heads
_a = decoder_ffn_dim
_a = decoder_layers
_a = decoder_attention_heads
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = activation_function
_a = init_std
_a = init_xavier_std
_a = encoder_layerdrop
_a = auxiliary_loss
_a = position_embedding_type
# deformable attributes
_a = num_feature_levels
_a = encoder_n_points
_a = decoder_n_points
_a = two_stage
_a = two_stage_num_proposals
_a = with_box_refine
_a = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("If two_stage is True, with_box_refine must be True." )
# Hungarian matcher
_a = class_cost
_a = bbox_cost
_a = giou_cost
# Loss coefficients
_a = mask_loss_coefficient
_a = dice_loss_coefficient
_a = bbox_loss_coefficient
_a = giou_loss_coefficient
_a = eos_coefficient
_a = focal_alpha
super().__init__(is_encoder_decoder=__a , **__a )
@property
def UpperCamelCase__ ( self : Optional[Any] ):
return self.encoder_attention_heads
@property
def UpperCamelCase__ ( self : Dict ):
return self.d_model
def UpperCamelCase__ ( self : List[str] ):
_a = copy.deepcopy(self.__dict__ )
_a = self.backbone_config.to_dict()
_a = self.__class__.model_type
return output
| 63 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a__ : Dict = {
'''tokenizer_file''': {
'''EleutherAI/gpt-neox-20b''': '''https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json''',
},
}
a__ : Dict = {
'''gpt-neox-20b''': 2_0_4_8,
}
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
snake_case__ : List[Any] = VOCAB_FILES_NAMES
snake_case__ : Dict = PRETRAINED_VOCAB_FILES_MAP
snake_case__ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : Union[str, Any] = ["input_ids", "attention_mask"]
def __init__( self : Union[str, Any] , UpperCAmelCase__ : int=None , UpperCAmelCase__ : List[str]=None , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Union[str, Any]="<|endoftext|>" , UpperCAmelCase__ : Optional[Any]="<|endoftext|>" , UpperCAmelCase__ : List[str]="<|endoftext|>" , UpperCAmelCase__ : List[Any]=False , **UpperCAmelCase__ : Optional[int] , ) -> Optional[Any]:
super().__init__(
UpperCAmelCase__ , UpperCAmelCase__ , tokenizer_file=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , add_prefix_space=UpperCAmelCase__ , **UpperCAmelCase__ , )
__SCREAMING_SNAKE_CASE = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase__ ) != add_prefix_space:
__SCREAMING_SNAKE_CASE = getattr(UpperCAmelCase__ , pre_tok_state.pop("type" ) )
__SCREAMING_SNAKE_CASE = add_prefix_space
__SCREAMING_SNAKE_CASE = pre_tok_class(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = add_prefix_space
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
__SCREAMING_SNAKE_CASE = self._tokenizer.model.save(UpperCAmelCase__ , name=UpperCAmelCase__ )
return tuple(UpperCAmelCase__ )
def UpperCAmelCase_ ( self : Optional[int] , UpperCAmelCase__ : "Conversation" ) -> List[int]:
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase__ , add_special_tokens=UpperCAmelCase__ ) + [self.eos_token_id] )
if len(UpperCAmelCase__ ) > self.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.model_max_length :]
return input_ids
| 54 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def _lowerCamelCase ( lowercase : Union[str, Any] , lowercase : int , lowercase : int=1024 , lowercase : int=1024 , lowercase : Tuple=False , **lowercase : Optional[int] ) -> Union[str, Any]:
_a = AutoTokenizer.from_pretrained(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="train" , **lowercase )
_a = tok.pad_token_id
def get_lens(lowercase : Optional[int] ):
_a = tqdm(
DataLoader(lowercase , batch_size=512 , num_workers=8 , shuffle=lowercase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a = []
for batch in dl:
_a = batch["input_ids"].ne(lowercase ).sum(1 ).tolist()
_a = batch["labels"].ne(lowercase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowercase , lowercase ):
max_lens.append(max(lowercase , lowercase ) )
else:
max_lens.extend(lowercase )
return max_lens
_a = get_lens(lowercase )
_a = SeqaSeqDataset(lowercase , lowercase , lowercase , lowercase , type_path="val" , **lowercase )
_a = get_lens(lowercase )
pickle_save(lowercase , train_ds.len_file )
pickle_save(lowercase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 63 | 0 |
'''simple docstring'''
import unittest
from transformers import MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING, AutoTokenizer, is_vision_available
from transformers.pipelines import pipeline
from transformers.pipelines.document_question_answering import apply_tesseract
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_detectrona,
require_pytesseract,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
from transformers.image_utils import load_image
else:
class snake_case :
"""simple docstring"""
@staticmethod
def snake_case ( *UpperCamelCase , **UpperCamelCase ):
"""simple docstring"""
pass
def __snake_case ( UpperCAmelCase_ : List[Any] ):
return None
# This is a pinned image from a specific revision of a document question answering space, hosted by HuggingFace,
# so we can expect it to be available.
a_ : Dict = (
"""https://huggingface.co/spaces/impira/docquery/resolve/2f6c96314dc84dfda62d40de9da55f2f5165d403/invoice.png"""
)
@is_pipeline_test
@require_torch
@require_vision
class snake_case ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase = MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING
@require_pytesseract
@require_vision
def snake_case ( self , UpperCamelCase , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model=UpperCamelCase , tokenizer=UpperCamelCase , image_processor=UpperCamelCase )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
lowerCamelCase_ = "What is the placebo?"
lowerCamelCase_ = [
{
"image": load_image(UpperCamelCase ),
"question": question,
},
{
"image": image,
"question": question,
},
{
"image": image,
"question": question,
"word_boxes": word_boxes,
},
]
return dqa_pipeline, examples
def snake_case ( self , UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
lowerCamelCase_ = dqa_pipeline(UpperCamelCase , top_k=2 )
self.assertEqual(
UpperCamelCase , [
[
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
{"score": ANY(UpperCamelCase ), "answer": ANY(UpperCamelCase ), "start": ANY(UpperCamelCase ), "end": ANY(UpperCamelCase )},
]
]
* 3 , )
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline("document-question-answering" , model="hf-internal-testing/tiny-random-layoutlmv2" )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "How many cats are there?"
lowerCamelCase_ = [
{"score": 0.0_001, "answer": "oy 2312/2019", "start": 38, "end": 39},
{"score": 0.0_001, "answer": "oy 2312/2019 DUE", "start": 38, "end": 40},
]
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , UpperCamelCase )
# This image does not detect ANY text in it, meaning layoutlmv2 should fail.
# Empty answer probably
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
# We can optionnally pass directly the words and bounding boxes
lowerCamelCase_ = "./tests/fixtures/tests_samples/COCO/000000039769.png"
lowerCamelCase_ = []
lowerCamelCase_ = []
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , words=UpperCamelCase , boxes=UpperCamelCase , top_k=2 )
self.assertEqual(UpperCamelCase , [] )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_944, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_009, "answer": "us-001", "start": 16, "end": 16},
],
]
* 2 , )
@slow
@require_torch
@require_detectrona
@require_pytesseract
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="tiennvcs/layoutlmv2-base-uncased-finetuned-docvqa" , revision="9977165" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_974, "answer": "1110212019", "start": 23, "end": 23},
{"score": 0.9_948, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.4_251, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.0_819, "answer": "1110212019", "start": 23, "end": 23},
] , )
@slow
@require_torch
@require_pytesseract
@require_vision
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = AutoTokenizer.from_pretrained(
"impira/layoutlm-document-qa" , revision="3dc6de3" , add_prefix_space=UpperCamelCase )
lowerCamelCase_ = pipeline(
"document-question-answering" , model="impira/layoutlm-document-qa" , tokenizer=UpperCamelCase , revision="3dc6de3" , max_seq_len=50 , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
lowerCamelCase_ = dqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
[
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
]
]
* 2 , )
lowerCamelCase_ = list(zip(*apply_tesseract(load_image(UpperCamelCase ) , UpperCamelCase , "" ) ) )
# This model should also work if `image` is set to None
lowerCamelCase_ = dqa_pipeline({"image": None, "word_boxes": word_boxes, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"score": 0.9_999, "answer": "us-001", "start": 16, "end": 16},
{"score": 0.9_998, "answer": "us-001", "start": 16, "end": 16},
] , )
@slow
@require_torch
def snake_case ( self ):
"""simple docstring"""
lowerCamelCase_ = pipeline(
"document-question-answering" , model="naver-clova-ix/donut-base-finetuned-docvqa" , tokenizer=AutoTokenizer.from_pretrained("naver-clova-ix/donut-base-finetuned-docvqa" ) , feature_extractor="naver-clova-ix/donut-base-finetuned-docvqa" , )
lowerCamelCase_ = INVOICE_URL
lowerCamelCase_ = "What is the invoice number?"
lowerCamelCase_ = dqa_pipeline(image=UpperCamelCase , question=UpperCamelCase , top_k=2 )
self.assertEqual(nested_simplify(UpperCamelCase , decimals=4 ) , [{"answer": "us-001"}] )
@require_tf
@unittest.skip("Document question answering not implemented in TF" )
def snake_case ( self ):
"""simple docstring"""
pass
| 55 |
'''simple docstring'''
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class __SCREAMING_SNAKE_CASE (unittest.TestCase ):
"""simple docstring"""
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
"unet/diffusion_pytorch_model.bin",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : List[str] ):
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
self.assertTrue(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.bin",
"safety_checker/model.safetensors",
"vae/diffusion_pytorch_model.bin",
"vae/diffusion_pytorch_model.safetensors",
"text_encoder/pytorch_model.bin",
# Removed: 'text_encoder/model.safetensors',
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
self.assertFalse(is_safetensors_compatible(__a ) )
def UpperCamelCase__ ( self : str ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
_a = [
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Any ):
# pass variant but use the non-variant filenames
_a = [
"unet/diffusion_pytorch_model.bin",
"unet/diffusion_pytorch_model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[Any] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
"unet/diffusion_pytorch_model.fp16.bin",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Dict ):
_a = [
"text_encoder/pytorch_model.fp16.bin",
"text_encoder/model.fp16.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : List[str] ):
# pass variant but use the non-variant filenames
_a = [
"text_encoder/pytorch_model.bin",
"text_encoder/model.safetensors",
]
_a = "fp16"
self.assertTrue(is_safetensors_compatible(__a , variant=__a ) )
def UpperCamelCase__ ( self : Optional[int] ):
_a = [
"safety_checker/pytorch_model.fp16.bin",
"safety_checker/model.fp16.safetensors",
"vae/diffusion_pytorch_model.fp16.bin",
"vae/diffusion_pytorch_model.fp16.safetensors",
"text_encoder/pytorch_model.fp16.bin",
# 'text_encoder/model.fp16.safetensors',
"unet/diffusion_pytorch_model.fp16.bin",
"unet/diffusion_pytorch_model.fp16.safetensors",
]
_a = "fp16"
self.assertFalse(is_safetensors_compatible(__a , variant=__a ) )
| 63 | 0 |
'''simple docstring'''
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a : List[Any] = 'CompVis/stable-diffusion-v1-1'
a : Union[str, Any] = 'CompVis/stable-diffusion-v1-2'
a : Tuple = 'CompVis/stable-diffusion-v1-3'
a : Optional[Any] = 'CompVis/stable-diffusion-v1-4'
class a ( _lowerCamelCase ):
def __init__( self : Any , lowercase_ : AutoencoderKL , lowercase_ : CLIPTextModel , lowercase_ : CLIPTokenizer , lowercase_ : UNetaDConditionModel , lowercase_ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowercase_ : StableDiffusionSafetyChecker , lowercase_ : CLIPImageProcessor , lowercase_ : bool = True , ):
super()._init_()
snake_case_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
snake_case_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
snake_case_ = StableDiffusionPipeline.from_pretrained(lowercase_ )
snake_case_ = StableDiffusionPipeline(
vae=lowercase_ , text_encoder=lowercase_ , tokenizer=lowercase_ , unet=lowercase_ , scheduler=lowercase_ , safety_checker=lowercase_ , feature_extractor=lowercase_ , requires_safety_checker=lowercase_ , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A_ ( self : Optional[int] ):
return {k: getattr(self , lowercase_ ) for k in self.config.keys() if not k.startswith('''_''' )}
def A_ ( self : List[Any] , lowercase_ : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
snake_case_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowercase_ )
def A_ ( self : Optional[int] ):
self.enable_attention_slicing(lowercase_ )
@torch.no_grad()
def A_ ( self : Any , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : Union[str, Any] , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : str , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : Optional[int] , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : Tuple , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : Tuple , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : Optional[Any] , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : int , ):
return self.pipea(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
@torch.no_grad()
def A_ ( self : Optional[int] , lowercase_ : Union[str, List[str]] , lowercase_ : int = 512 , lowercase_ : int = 512 , lowercase_ : int = 50 , lowercase_ : float = 7.5 , lowercase_ : Optional[Union[str, List[str]]] = None , lowercase_ : Optional[int] = 1 , lowercase_ : float = 0.0 , lowercase_ : Optional[torch.Generator] = None , lowercase_ : Optional[torch.FloatTensor] = None , lowercase_ : Optional[str] = "pil" , lowercase_ : bool = True , lowercase_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowercase_ : int = 1 , **lowercase_ : int , ):
snake_case_ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(lowercase_ )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F"`height` and `width` must be divisible by 8 but are {height} and {width}." )
# Get first result from Stable Diffusion Checkpoint v1.1
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.2
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.3
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get first result from Stable Diffusion Checkpoint v1.4
snake_case_ = self.textaimg_sda_a(
prompt=lowercase_ , height=lowercase_ , width=lowercase_ , num_inference_steps=lowercase_ , guidance_scale=lowercase_ , negative_prompt=lowercase_ , num_images_per_prompt=lowercase_ , eta=lowercase_ , generator=lowercase_ , latents=lowercase_ , output_type=lowercase_ , return_dict=lowercase_ , callback=lowercase_ , callback_steps=lowercase_ , **lowercase_ , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 56 |
'''simple docstring'''
def _lowerCamelCase ( lowercase : bytes ) -> str:
return "".join([hex(lowercase )[2:].zfill(2 ).upper() for byte in list(lowercase )] )
def _lowerCamelCase ( lowercase : str ) -> bytes:
# Check data validity, following RFC3548
# https://www.ietf.org/rfc/rfc3548.txt
if (len(lowercase ) % 2) != 0:
raise ValueError(
"Base16 encoded data is invalid:\nData does not have an even number of hex digits." )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(lowercase ) <= set("0123456789ABCDEF" ):
raise ValueError(
"Base16 encoded data is invalid:\nData is not uppercase hex or it contains invalid characters." )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 16 ) for i in range(0 , len(lowercase ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 63 | 0 |
"""simple docstring"""
A : int = "\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n"
A : List[Any] = [{"type": "code", "content": INSTALL_CONTENT}]
A : List[str] = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| 57 |
'''simple docstring'''
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : Optional[int] , lowercase : Optional[Any] , lowercase : Dict ) -> str:
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _lowerCamelCase ( lowercase : Optional[Any] , lowercase : int , lowercase : Tuple , lowercase : Optional[int] , lowercase : int=True ) -> Any:
model.train()
_a = model(lowercase )
_a = F.mse_loss(lowercase , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowercase )
def _lowerCamelCase ( lowercase : int , lowercase : Tuple=False ) -> List[str]:
set_seed(42 )
_a = RegressionModel()
_a = deepcopy(lowercase )
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
model.to(accelerator.device )
if sched:
_a = AdamW(params=model.parameters() , lr=1E-3 )
_a = AdamW(params=ddp_model.parameters() , lr=1E-3 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
_a = LambdaLR(lowercase , lr_lambda=lambda lowercase : epoch**0.65 )
# Make a copy of `model`
if sched:
_a , _a , _a , _a = accelerator.prepare(lowercase , lowercase , lowercase , lowercase )
else:
_a , _a = accelerator.prepare(lowercase , lowercase )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _lowerCamelCase ( lowercase : Optional[Any] ) -> Optional[int]:
# Test when on a single CPU or GPU that the context manager does nothing
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : Tuple ) -> Tuple:
# Test on distributed setup that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
# Use a single batch
_a , _a = next(iter(lowercase ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
else:
# Sync grads
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
def _lowerCamelCase ( lowercase : List[Any]=False , lowercase : Optional[int]=False ) -> Any:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a = get_training_setup(lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowercase ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
_a = ddp_input[torch.randperm(len(lowercase ) )]
GradientState._reset_state()
def _lowerCamelCase ( lowercase : int=False , lowercase : int=False ) -> Dict:
_a = Accelerator(
split_batches=lowercase , dispatch_batches=lowercase , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
_a , _a , _a , _a , _a , _a , _a = get_training_setup(lowercase , lowercase )
for iteration, batch in enumerate(lowercase ):
_a , _a = batch.values()
# Gather the distributed inputs and targs for the base model
_a , _a = accelerator.gather((ddp_input, ddp_target) )
_a , _a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowercase , lowercase , lowercase , lowercase , lowercase )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowercase )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowercase ):
step_model(lowercase , lowercase , lowercase , lowercase )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
_a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowercase ))
if accelerator.num_processes > 1:
check_model_parameters(lowercase , lowercase , lowercase , lowercase )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _lowerCamelCase ( ) -> Any:
_a = Accelerator()
_a = RegressionDataset(length=80 )
_a = DataLoader(lowercase , batch_size=16 )
_a = RegressionDataset(length=96 )
_a = DataLoader(lowercase , batch_size=16 )
_a , _a = accelerator.prepare(lowercase , lowercase )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if iteration < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowercase ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowercase )
if batch_num < len(lowercase ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _lowerCamelCase ( ) -> Optional[Any]:
_a = Accelerator()
_a = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowercase )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowercase )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowercase , lowercase )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowercase , lowercase )
def _lowerCamelCase ( lowercase : Any ) -> Tuple:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 63 | 0 |
'''simple docstring'''
def lowerCamelCase ( __lowerCamelCase : int = 1000 ) ->int:
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1, 1
_SCREAMING_SNAKE_CASE = []
for i in range(1 , n + 1 ):
_SCREAMING_SNAKE_CASE = prev_numerator + 2 * prev_denominator
_SCREAMING_SNAKE_CASE = prev_numerator + prev_denominator
if len(str(__lowerCamelCase ) ) > len(str(__lowerCamelCase ) ):
result.append(__lowerCamelCase )
_SCREAMING_SNAKE_CASE = numerator
_SCREAMING_SNAKE_CASE = denominator
return len(__lowerCamelCase )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 58 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ : Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase_ : List[str] = {
'microsoft/trocr-base-handwritten': (
'https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json'
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class __SCREAMING_SNAKE_CASE (lowerCamelCase_ ):
"""simple docstring"""
__a ='trocr'
__a =['past_key_values']
__a ={
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self : Optional[int] , __a : Any=5_02_65 , __a : Optional[int]=10_24 , __a : List[Any]=12 , __a : str=16 , __a : int=40_96 , __a : Optional[Any]="gelu" , __a : Union[str, Any]=5_12 , __a : Dict=0.1 , __a : List[str]=0.0 , __a : Union[str, Any]=0.0 , __a : Any=2 , __a : Union[str, Any]=0.02 , __a : Any=0.0 , __a : List[str]=True , __a : Optional[Any]=False , __a : Union[str, Any]=True , __a : Optional[Any]=True , __a : Any=1 , __a : List[Any]=0 , __a : Any=2 , **__a : Optional[Any] , ):
_a = vocab_size
_a = d_model
_a = decoder_layers
_a = decoder_attention_heads
_a = decoder_ffn_dim
_a = activation_function
_a = max_position_embeddings
_a = dropout
_a = attention_dropout
_a = activation_dropout
_a = init_std
_a = decoder_layerdrop
_a = use_cache
_a = scale_embedding
_a = use_learned_position_embeddings
_a = layernorm_embedding
super().__init__(
pad_token_id=__a , bos_token_id=__a , eos_token_id=__a , decoder_start_token_id=__a , **__a , )
| 63 | 0 |
import math
def UpperCamelCase ( __lowerCamelCase : float , __lowerCamelCase : float ):
if initial_intensity < 0:
raise ValueError("The value of intensity cannot be negative" )
# handling of negative values of initial intensity
if angle < 0 or angle > 360:
raise ValueError("In Malus Law, the angle is in the range 0-360 degrees" )
# handling of values out of allowed range
return initial_intensity * (math.cos(math.radians(__lowerCamelCase ) ) ** 2)
if __name__ == "__main__":
import doctest
doctest.testmod(name="""malus_law""")
| 59 |
'''simple docstring'''
import argparse
import os
import re
lowerCAmelCase_ : Any = 'src/transformers/models/auto'
# re pattern that matches mapping introductions:
# SUPER_MODEL_MAPPING_NAMES = OrderedDict or SUPER_MODEL_MAPPING = OrderedDict
lowerCAmelCase_ : List[str] = re.compile(R'[A-Z_]+_MAPPING(\s+|_[A-Z_]+\s+)=\s+OrderedDict')
# re pattern that matches identifiers in mappings
lowerCAmelCase_ : Tuple = re.compile(R'\s*\(\s*"(\S[^"]+)"')
def _lowerCamelCase ( lowercase : Any , lowercase : bool = False ) -> Optional[Any]:
with open(lowercase , "r" , encoding="utf-8" ) as f:
_a = f.read()
_a = content.split("\n" )
_a = []
_a = 0
while line_idx < len(lowercase ):
if _re_intro_mapping.search(lines[line_idx] ) is not None:
_a = len(re.search(r"^(\s*)\S" , lines[line_idx] ).groups()[0] ) + 8
# Start of a new mapping!
while not lines[line_idx].startswith(" " * indent + "(" ):
new_lines.append(lines[line_idx] )
line_idx += 1
_a = []
while lines[line_idx].strip() != "]":
# Blocks either fit in one line or not
if lines[line_idx].strip() == "(":
_a = line_idx
while not lines[line_idx].startswith(" " * indent + ")" ):
line_idx += 1
blocks.append("\n".join(lines[start_idx : line_idx + 1] ) )
else:
blocks.append(lines[line_idx] )
line_idx += 1
# Sort blocks by their identifiers
_a = sorted(lowercase , key=lambda lowercase : _re_identifier.search(lowercase ).groups()[0] )
new_lines += blocks
else:
new_lines.append(lines[line_idx] )
line_idx += 1
if overwrite:
with open(lowercase , "w" , encoding="utf-8" ) as f:
f.write("\n".join(lowercase ) )
elif "\n".join(lowercase ) != content:
return True
def _lowerCamelCase ( lowercase : bool = False ) -> List[str]:
_a = [os.path.join(lowercase , lowercase ) for f in os.listdir(lowercase ) if f.endswith(".py" )]
_a = [sort_auto_mapping(lowercase , overwrite=lowercase ) for fname in fnames]
if not overwrite and any(lowercase ):
_a = [f for f, d in zip(lowercase , lowercase ) if d]
raise ValueError(
F'The following files have auto mappings that need sorting: {", ".join(lowercase )}. Run `make style` to fix'
" this." )
if __name__ == "__main__":
lowerCAmelCase_ : Any = argparse.ArgumentParser()
parser.add_argument('--check_only', action='store_true', help='Whether to only check or fix style.')
lowerCAmelCase_ : Optional[int] = parser.parse_args()
sort_all_auto_mappings(not args.check_only)
| 63 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.